From b7765a96503df676d2e2a21d45abdc547a454464 Mon Sep 17 00:00:00 2001 From: Patrycja Kalinska Date: Wed, 11 Mar 2026 15:32:43 +0100 Subject: [PATCH] Optimize LLM markdown --- .../docs/fundamentals/introduction.mdx | 1 - packages/audiodocs/package.json | 2 +- .../audiodocs/src/components/Hero/index.tsx | 2 +- packages/audiodocs/static/llms-full.txt | 6523 ----------------- packages/audiodocs/static/llms.txt | 97 - .../static/raw/analysis/analyser-node.md | 118 - .../static/raw/core/audio-context.md | 48 - .../audiodocs/static/raw/core/audio-node.md | 127 - .../audiodocs/static/raw/core/audio-param.md | 153 - .../static/raw/core/base-audio-context.md | 315 - .../static/raw/core/offline-audio-context.md | 48 - .../destinations/audio-destination-node.md | 22 - .../static/raw/effects/biquad-filter-node.md | 86 - .../static/raw/effects/convolver-node.md | 41 - .../static/raw/effects/delay-node.md | 43 - .../audiodocs/static/raw/effects/gain-node.md | 73 - .../static/raw/effects/iir-filter-node.md | 45 - .../static/raw/effects/periodic-wave.md | 37 - .../static/raw/effects/stereo-panner-node.md | 42 - .../static/raw/effects/wave-shaper-node.md | 60 - .../static/raw/fundamentals/best-practices.md | 24 - .../raw/fundamentals/getting-started.md | 157 - .../static/raw/fundamentals/introduction.md | 25 - .../raw/guides/create-your-own-effect.md | 249 - .../static/raw/guides/lets-make-some-noise.md | 102 - .../raw/guides/making-a-piano-keyboard.md | 359 - .../static/raw/guides/noise-generation.md | 119 - .../static/raw/guides/see-your-sound.md | 255 - .../static/raw/inputs/audio-recorder.md | 726 -- .../static/raw/other/audio-api-plugin.md | 137 - .../static/raw/other/compatibility.md | 31 - .../audiodocs/static/raw/other/ffmpeg-info.md | 33 - .../static/raw/other/non-expo-permissions.md | 24 - .../raw/other/running_with_mac_catalyst.md | 165 - .../audiodocs/static/raw/other/testing.md | 361 - .../raw/other/web-audio-api-coverage.md | 53 - .../static/raw/react/select-input.md | 62 - .../sources/audio-buffer-base-source-node.md | 90 - .../sources/audio-buffer-queue-source-node.md | 130 - .../raw/sources/audio-buffer-source-node.md | 141 - .../static/raw/sources/audio-buffer.md | 96 - .../sources/audio-scheduled-source-node.md | 71 - .../raw/sources/constant-source-node.md | 81 - .../static/raw/sources/oscillator-node.md | 95 - .../raw/sources/recorder-adapter-node.md | 43 - .../static/raw/sources/streamer-node.md | 59 - .../static/raw/system/audio-manager.md | 295 - .../system/playback-notification-manager.md | 182 - .../system/recording-notification-manager.md | 108 - .../static/raw/types/channel-count-mode.md | 17 - .../raw/types/channel-interpretation.md | 39 - .../static/raw/types/oscillator-type.md | 19 - .../audiodocs/static/raw/utils/decoding.md | 92 - .../static/raw/utils/time-stretching.md | 28 - .../static/raw/worklets/introduction.md | 71 - .../static/raw/worklets/worklet-node.md | 84 - .../raw/worklets/worklet-processing-node.md | 160 - .../raw/worklets/worklet-source-node.md | 162 - packages/audiodocs/yarn.lock | 8 +- 59 files changed, 6 insertions(+), 12830 deletions(-) delete mode 100644 packages/audiodocs/static/llms-full.txt delete mode 100644 packages/audiodocs/static/llms.txt delete mode 100644 packages/audiodocs/static/raw/analysis/analyser-node.md delete mode 100644 packages/audiodocs/static/raw/core/audio-context.md delete mode 100644 packages/audiodocs/static/raw/core/audio-node.md delete mode 100644 packages/audiodocs/static/raw/core/audio-param.md delete mode 100644 packages/audiodocs/static/raw/core/base-audio-context.md delete mode 100644 packages/audiodocs/static/raw/core/offline-audio-context.md delete mode 100644 packages/audiodocs/static/raw/destinations/audio-destination-node.md delete mode 100644 packages/audiodocs/static/raw/effects/biquad-filter-node.md delete mode 100644 packages/audiodocs/static/raw/effects/convolver-node.md delete mode 100644 packages/audiodocs/static/raw/effects/delay-node.md delete mode 100644 packages/audiodocs/static/raw/effects/gain-node.md delete mode 100644 packages/audiodocs/static/raw/effects/iir-filter-node.md delete mode 100644 packages/audiodocs/static/raw/effects/periodic-wave.md delete mode 100644 packages/audiodocs/static/raw/effects/stereo-panner-node.md delete mode 100644 packages/audiodocs/static/raw/effects/wave-shaper-node.md delete mode 100644 packages/audiodocs/static/raw/fundamentals/best-practices.md delete mode 100644 packages/audiodocs/static/raw/fundamentals/getting-started.md delete mode 100644 packages/audiodocs/static/raw/fundamentals/introduction.md delete mode 100644 packages/audiodocs/static/raw/guides/create-your-own-effect.md delete mode 100644 packages/audiodocs/static/raw/guides/lets-make-some-noise.md delete mode 100644 packages/audiodocs/static/raw/guides/making-a-piano-keyboard.md delete mode 100644 packages/audiodocs/static/raw/guides/noise-generation.md delete mode 100644 packages/audiodocs/static/raw/guides/see-your-sound.md delete mode 100644 packages/audiodocs/static/raw/inputs/audio-recorder.md delete mode 100644 packages/audiodocs/static/raw/other/audio-api-plugin.md delete mode 100644 packages/audiodocs/static/raw/other/compatibility.md delete mode 100644 packages/audiodocs/static/raw/other/ffmpeg-info.md delete mode 100644 packages/audiodocs/static/raw/other/non-expo-permissions.md delete mode 100644 packages/audiodocs/static/raw/other/running_with_mac_catalyst.md delete mode 100644 packages/audiodocs/static/raw/other/testing.md delete mode 100644 packages/audiodocs/static/raw/other/web-audio-api-coverage.md delete mode 100644 packages/audiodocs/static/raw/react/select-input.md delete mode 100644 packages/audiodocs/static/raw/sources/audio-buffer-base-source-node.md delete mode 100644 packages/audiodocs/static/raw/sources/audio-buffer-queue-source-node.md delete mode 100644 packages/audiodocs/static/raw/sources/audio-buffer-source-node.md delete mode 100644 packages/audiodocs/static/raw/sources/audio-buffer.md delete mode 100644 packages/audiodocs/static/raw/sources/audio-scheduled-source-node.md delete mode 100644 packages/audiodocs/static/raw/sources/constant-source-node.md delete mode 100644 packages/audiodocs/static/raw/sources/oscillator-node.md delete mode 100644 packages/audiodocs/static/raw/sources/recorder-adapter-node.md delete mode 100644 packages/audiodocs/static/raw/sources/streamer-node.md delete mode 100644 packages/audiodocs/static/raw/system/audio-manager.md delete mode 100644 packages/audiodocs/static/raw/system/playback-notification-manager.md delete mode 100644 packages/audiodocs/static/raw/system/recording-notification-manager.md delete mode 100644 packages/audiodocs/static/raw/types/channel-count-mode.md delete mode 100644 packages/audiodocs/static/raw/types/channel-interpretation.md delete mode 100644 packages/audiodocs/static/raw/types/oscillator-type.md delete mode 100644 packages/audiodocs/static/raw/utils/decoding.md delete mode 100644 packages/audiodocs/static/raw/utils/time-stretching.md delete mode 100644 packages/audiodocs/static/raw/worklets/introduction.md delete mode 100644 packages/audiodocs/static/raw/worklets/worklet-node.md delete mode 100644 packages/audiodocs/static/raw/worklets/worklet-processing-node.md delete mode 100644 packages/audiodocs/static/raw/worklets/worklet-source-node.md diff --git a/packages/audiodocs/docs/fundamentals/introduction.mdx b/packages/audiodocs/docs/fundamentals/introduction.mdx index ff5436f67..10707adcc 100644 --- a/packages/audiodocs/docs/fundamentals/introduction.mdx +++ b/packages/audiodocs/docs/fundamentals/introduction.mdx @@ -1,5 +1,4 @@ --- -slug: / sidebar_position: 1 --- diff --git a/packages/audiodocs/package.json b/packages/audiodocs/package.json index 7b84161d5..d39d8f4cd 100644 --- a/packages/audiodocs/package.json +++ b/packages/audiodocs/package.json @@ -33,7 +33,7 @@ "@mui/material": "^7.1.0", "@react-native-community/slider": "^5.0.1", "@shopify/react-native-skia": "1.10.2", - "@swmansion/t-rex-ui": "1.3.0", + "@swmansion/t-rex-ui": "1.3.1", "@types/three": "^0.180.0", "@vercel/og": "^0.6.2", "babel-polyfill": "^6.26.0", diff --git a/packages/audiodocs/src/components/Hero/index.tsx b/packages/audiodocs/src/components/Hero/index.tsx index dbb23b8ec..91a2609da 100644 --- a/packages/audiodocs/src/components/Hero/index.tsx +++ b/packages/audiodocs/src/components/Hero/index.tsx @@ -23,7 +23,7 @@ const Hero = () => {
diff --git a/packages/audiodocs/static/llms-full.txt b/packages/audiodocs/static/llms-full.txt deleted file mode 100644 index 89e821d15..000000000 --- a/packages/audiodocs/static/llms-full.txt +++ /dev/null @@ -1,6523 +0,0 @@ -# Documentation (Full) - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/analysis/analyser-node -# Title: analyser-node - -# AnalyserNode - -The `AnalyserNode` interface represents a node providing two core functionalities: extracting time-domain data and frequency-domain data from audio signals. -It is an [`AudioNode`](/docs/core/audio-node) that passes the audio data unchanged from input to output, but allows to take passed data and process it. - -#### [`AudioNode`](/docs/core/audio-node#properties) properties - -#### Time domain vs Frequency domain - -![time-domain-vs-frequency-domain](/img/time_domain_vs_frequency_domain.jpg) - -A time-domain graph illustrates how a signal evolves over time, displaying changes in amplitude or intensity as time progresses. -In contrast, a frequency-domain graph reveals how the signal's energy or power is distributed across different frequency bands, highlighting the presence and strength of various frequency components over a specified range. - -## Constructor - -```tsx -constructor(context: BaseAudioContext, options?: AnalyserOptions) -``` - -### `AnalyserOptions` - -Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions) - -| Parameter | Type | Default | | -| :---: | :---: | :----: | :---- | -| `fftSize` | `number` | 2048 | Number representing size of fast fourier transform | -| `minDecibels` | `number` | -100 | Initial minimum power in dB for FFT analysis | -| `maxDecibels` | `number` | -30 | Initial maximum power in dB for FFT analysis | -| `smoothingTimeConstant` | `number` | 0.8 | Initial smoothing constant for the FFT analysis | - -Or by using `BaseAudioContext` factory method: -[`BaseAudioContext.createAnalyser()`](/docs/core/base-audio-context#createanalyser) that creates node with default values. - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Description | | -| :----: | :----: | :-------- | :-: | -| `fftSize` | `number` | Integer value representing size of [Fast Fourier Transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) used to determine frequency domain. In general it is size of returning time-domain data. | -| `minDecibels` | `number` | Float value representing the minimum value for the range of results from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata). | -| `maxDecibels` | `number` | Float value representing the maximum value for the range of results from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata). | -| `smoothingTimeConstant` | `number` | Float value representing averaging constant with the last analysis frame. In general the higher value the smoother is the transition between values over time. | -| `frequencyBinCount` | `number` | Integer value representing amount of the data obtained in frequency domain, half of the `fftSize` property. | | - -## Methods - -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -### `getFloatFrequencyData` - -Copies current frequency data into given array. -Each value in the array represents the decibel value for a specific frequency. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `array` | `Float32Array` | The array to which frequency data will be copied. | - -#### Returns `undefined`. - -### `getByteFrequencyData` - -Copies current frequency data into given array. -Each value in the array is within the range 0 to 255. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `array` | `Uint8Array` | The array to which frequency data will be copied. | - -#### Returns `undefined`. - -### `getFloatTimeDomainData` - -Copies current time-domain data into given array. -Each value in the array is the magnitude of the signal at a particular time. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `array` | `Float32Array` | The array to which time-domain data will be copied. | - -#### Returns `undefined`. - -### `getByteTimeDomainData` - -Copies current time-domain data into given array. -Each value in the array is within the range 0 to 255, where value of 127 indicates silence. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `array` | `Uint8Array` | The array to which time-domain data will be copied. | - -#### Returns `undefined`. - -## Remarks - -#### `fftSize` - -* Must be a power of 2 between 32 and 32768. -* Throws `IndexSizeError` if set value is not power of 2, or is outside the allowed range. - -#### `minDecibels` - -* 0 dB([decibel](https://en.wikipedia.org/wiki/Decibel)) is the loudest possible sound, -10 dB is a 10th of that. -* When getting data from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude lower then `minDecibels` will be returned as 0. -* Throws `IndexSizeError` if set value is greater than or equal to `maxDecibels`. - -#### `maxDecibels` - -* 0 dB([decibel](https://en.wikipedia.org/wiki/Decibel)) is the loudest possible sound, -10 dB is a 10th of that. -* When getting data from [`getByteFrequencyData()`](/docs/analysis/analyser-node#getbytefrequencydata), any frequency with amplitude higher then `maxDecibels` will be returned as 255. -* Throws `IndexSizeError` if set value is less then or equal to `minDecibels`. - -#### `smoothingTimeConstant` - -* Nominal range is 0 to 1. -* 0 means no averaging, 1 means "overlap the previous and current buffer quite a lot while computing the value". -* Throws `IndexSizeError` if set value is outside the allowed range. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/core/audio-context -# Title: audio-context - -# AudioContext - -The `AudioContext` interface inherits from [`BaseAudioContext`](/docs/core/base-audio-context). -It is responsible for supervising and managing audio-processing graph. - -## Constructor - -`new AudioContext(options: AudioContextOptions)` - -```jsx -interface AudioContextOptions { - sampleRate: number; -} -``` - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `NotSupportedError` | `sampleRate` is outside the nominal range \[8000, 96000]. | - -## Properties - -`AudioContext` does not define any additional properties. -It inherits all properties from [`BaseAudioContext`](/docs/core/base-audio-context#properties). - -## Methods - -It inherits all methods from [`BaseAudioContext`](/docs/core/base-audio-context#methods). - -### `close` - -Closes the audio context, releasing any system audio resources that it uses. - -#### Returns `Promise`. - -### `suspend` - -Suspends time progression in the audio context. -It is useful when your application will not use audio for a while. - -#### Returns `Promise`. - -### `resume` - -Resumes a previously suspended audio context. - -#### Returns `Promise`. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/core/audio-node -# Title: audio-node - -# AudioNode - -The `AudioNode` interface serves as a versatile interface for constructing an audio processing graph, representing individual units of audio processing functionality. -Each `AudioNode` is associated with a certain number of audio channels that facilitate the transfer of audio data through processing graph. - -We usually represent the channels with the standard abbreviations detailed in the table below: - -| Name | Number of channels | Channels | -| :----: | :------: | :-------- | -| Mono | 1 | 0: M - mono | -| Stereo | 2 | 0: L - left 1: R - right | -| Quad | 4 | 0: L - left 1: R - right 2: SL - surround left 3: SR - surround right | -| Stereo | 6 | 0: L - left 1: R - right 2: C - center 3: LFE - subwoofer 4: SL - surround left 5: SR - surround right | - -#### Mixing - -When node has more then one input or number of inputs channels differs from output up-mixing or down-mixing must be conducted. -There are three properties involved in mixing process: `channelCount`, [`ChannelCountMode`](/docs/types/channel-count-mode), [`ChannelInterpretation`](/docs/types/channel-interpretation). -Based on them we can obtain output's number of channels and mixing strategy. - -## Properties - -| Name | Type | Description | | -| :----: | :----: | :-------- | :-: | -| `context` | [`BaseAudioContext`](/docs/core/base-audio-context) | Associated context. | | -| `numberOfInputs` | `number` | Integer value representing the number of input connections for the node. | | -| `numberOfOutputs` | `number` | Integer value representing the number of output connections for the node. | | -| `channelCount` | `number` | Integer used to determine how many channels are used when up-mixing or down-mixing node's inputs. | | -| `channelCountMode` | [`ChannelCountMode`](/docs/types/channel-count-mode) | Enumerated value that specifies the method by which channels are mixed between the node's inputs and outputs. | | -| `channelInterpretation` | [`ChannelInterpretation`](/docs/types/channel-interpretation) | Enumerated value that specifies how input channels are mapped to output channels when number of them is different. | | - -## Examples - -### Connecting node to node - -```tsx -import { OscillatorNode, GainNode, AudioContext } from 'react-native-audio-api'; - -function App() { - const audioContext = new AudioContext(); - const oscillatorNode = audioContext.createOscillator(); - const gainNode = audioContext.createGain(); - - gainNode.gain.value = 0.5; //lower volume to 0.5 - oscillatorNode.connect(gainNode); - gainNode.connect(audioContext.destination); - oscillatorNode.start(audioContext.currentTime); -} -``` - -### Connecting node to audio param (LFO-controlled parameter) - -```tsx -import { OscillatorNode, GainNode, AudioContext } from 'react-native-audio-api'; - -function App() { - const audioContext = new AudioContext(); - const oscillatorNode = audioContext.createOscillator(); - const lfo = audioContext.createOscillator(); - const gainNode = audioContext.createGain(); - - gainNode.gain.value = 0.5; //lower volume to 0.5 - lfo.frequency.value = 2; //low frequency oscillator with 2Hz - - // by default oscillator wave values ranges from -1 to 1 - // connecting lfo to gain param will cause the gain param to oscillate at 2Hz and its value will range from 0.5 - 1 to 0.5 + 1 - // you can modulate amplitude by connecting lfo to another gain that would be responsible for this value - lfo.connect(gainNode.gain) - - oscillatorNode.connect(gainNode); - gainNode.connect(audioContext.destination); - oscillatorNode.start(audioContext.currentTime); - lfo.start(audioContext.currentTime); -} -``` - -## Methods - -### `connect` - -Connects one of the node's outputs to a destination. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `destination` | [`AudioNode`](/docs/core/audio-node) or [`AudioParam`](/docs/core/audio-param) | `AudioNode` or `AudioParam` to which to connect. | - -#### Errors: - -| Error type | Description | -| :---: | :---- | -| `InvalidAccessError` | If `destination` is not part of the same audio context as the node. | - -#### Returns `undefined`. - -### `disconnect` - -Disconnects one or more nodes from the node. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `destination` | [`AudioNode`](/docs/core/audio-node) or [`AudioParam`](/docs/core/audio-param) | `AudioNode` or `AudioParam` from which to disconnect. | - -If no arguments provided node disconnects from all outgoing connections. - -#### Returns `undefined`. - -### `AudioNodeOptions` - -It is used to constructing majority of all `AudioNodes`. - -| Parameter | Type | Default | Description | -| :---: | :---: | :----: | :---- | -| `channelCount` | `number` | 2 | Indicates number of channels used in mixing of node. | -| `channelCountMode` | [`ChannelCountMode`](/docs/types/channel-count-mode) | `max` | Determines how the number of input channels affects the number of output channels in an audio node. | -| `channelInterpretation` | [`ChannelInterpretation`](/docs/types/channel-interpretation) | `speakers` | Specifies how input channels are mapped out to output channels when the number of them are different. | - -If any of these values are not provided, default values are used. - -## Remarks - -#### `numberOfInputs` - -* Source nodes are characterized by having a `numberOfInputs` value of 0. - -#### `numberOfOutputs` - -* Destination nodes are characterized by having a `numberOfOutputs` value of 0. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/core/audio-param -# Title: audio-param - -# AudioParam - -The `AudioParam` interface represents audio-related parameter (such as `gain` property of [GainNode\`](/docs/effects/gain-node)). -It can be set to specific value or schedule value change to happen at specific time, and following specific pattern. - -#### a-rate vs k-rate - -* `a-rate` - takes the current audio parameter value for each sample frame of the audio signal. -* `k-rate` - uses the same initial audio parameter value for the whole block processed. - -## Properties - -| Name | Type | Description | | -| :----: | :----: | :-------- | :-: | -| `defaultValue` | `number` | Initial value of the parameter. | | -| `minValue` | `number` | Minimum possible value of the parameter. | | -| `maxValue` | `number` | Maximum possible value of the parameter. | | -| `value` | `number` | Current value of the parameter. Initially set to `defaultValue`. | - -## Methods - -### `setValueAtTime` - -Schedules an instant change to the `value` at given `startTime`. - -> **Caution** -> -> If you need to call this function many times (especially more than 31 times), it is recommended to use the methods described below -> (such as [`linearRampToValueAtTime`](/docs/core/audio-param#linearramptovalueattime) or [`exponentialRampToValueAtTime`](/docs/core/audio-param#exponentialramptovalueattime)), -> as they are more efficient for continuous changes. For more specific use cases, you can schedule multiple value changes using [`setValueCurveAtTime`](/docs/core/audio-param#setvaluecurveattime). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `value` | `number` | A float representing the value the `AudioParam` will be set at given time | -| `startTime` | `number` | The time, in seconds, at which the change in value is going to happen. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties). | - -#### Errors: - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `startTime` is negative number. | - -#### Returns `AudioParam`. - -### `linearRampToValueAtTime` - -Schedules a gradual linear change to the new value. -The change begins at the time designated for the previous event. It follows a linear ramp to the `value`, achieving it by the specified `endTime`. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `value` | `number` | A float representing the value, the `AudioParam` will ramp to by given time. | -| `endTime` | `number` | The time, in seconds, at which the value ramp will end. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties). | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `endTime` is negative number. | - -#### Returns `AudioParam`. - -### `exponentialRampToValueAtTime` - -Schedules a gradual exponential change to the new value. -The change begins at the time designated for the previous event. It follows an exponential ramp to the `value`, achieving it by the specified `endTime`. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `value` | `number` | A float representing the value the `AudioParam` will ramp to by given time. | -| `endTime` | `number` | The time, in seconds, at which the value ramp will end. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties).| - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `endTime` is negative number. | - -#### Returns `AudioParam`. - -### `setTargetAtTime` - -Schedules a gradual change to the new value at the start time. -This method is useful for decay or release portions of [ADSR envelopes](/docs/effects/gain-node#envelope---adsr). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `target` | `number` | A float representing the value to which the `AudioParam` will start transitioning. | -| `startTime` | `number` | The time, in seconds, at which exponential transition will begin. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties). | -| `timeConstant` | `number` | A double representing the time-constant value of an exponential approach to the `target`. | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `startTime` is negative number. | -| `RangeError` | `timeConstant` is negative number. | - -#### Returns `AudioParam`. - -### `setValueCurveAtTime` - -Schedules the parameters's value change following a curve defined by given array. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `values` | `Float32Array` | The array of values defining a curve, which change will follow. | -| `startTime` | `number` | The time, in seconds, at which change will begin. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties). | -| `duration` | `number` | A double representing total time over which the change will happen. | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `startTime` is negative number. | - -#### Returns `AudioParam`. - -### `cancelScheduledValues` - -Cancels all scheduled changes after given cancel time. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `cancelTime` | `number` | The time, in seconds, after which all scheduled changes will be cancelled. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties). | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `cancelTime` is negative number. | - -#### Returns `AudioParam`. - -### `cancelAndHoldAtTime` - -Cancels all scheduled changes after given cancel time, but holds its value at given cancel time until further changes appear. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `cancelTime` | `number` | The time, in seconds, after which all scheduled changes will be cancelled. If it's smaller than [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties), it will be clamped to [`currentTime`](https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context#properties).| - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `RangeError` | `cancelTime` is negative number. | - -#### Returns `AudioParam`. - -## Remarks - -All time parameters should be in the same time coordinate system as [`BaseAudioContext.currentTime`](/docs/core/base-audio-context). - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/core/base-audio-context -# Title: base-audio-context - -# BaseAudioContext - -The `BaseAudioContext` interface acts as a supervisor of audio-processing graphs. It provides key processing parameters such as current time, output destination or sample rate. -Additionally, it is responsible for nodes creation and audio-processing graph's lifecycle management. -However, `BaseAudioContext` itself cannot be directly utilized, instead its functionalities must be accessed through one of its derived interfaces: [`AudioContext`](/docs/core/audio-context), [`OfflineAudioContext`](/docs/core/offline-audio-context). - -#### Audio graph - -An audio graph is a structured representation of audio processing elements and their connections within an audio context. -The graph consists of various types of nodes, each performing specific audio operations, connected in a network that defines the audio signal flow. -In general we can distinguish four types of nodes: - -* Source nodes (e.g [`AudioBufferSourceNode`](/docs/sources/audio-buffer-source-node), [`OscillatorNode`](/docs/sources/oscillator-node)) -* Effect nodes (e.g [`GainNode`](/docs/effects/gain-node), [`BiquadFilterNode`](/docs/effects/biquad-filter-node)) -* Analysis nodes (e.g [`AnalyserNode`](/docs/analysis/analyser-node)) -* Destination nodes (e.g [`AudioDestinationNode`](/docs/destinations/audio-destination-node)) - -![](/img/audio-graph.png) - -#### Rendering audio graph - -Audio graph rendering is done in blocks of sample-frames. The number of sample-frames in a block is called render quantum size, and the block itself is called a render quantum. -By default render quantum size value is 128 and it is constant. - -The [`AudioContext`](/docs/core/audio-context) rendering thread is driven by a system-level audio callback. -Each call has a system-level audio callback buffer size, which is a varying number of sample-frames that needs to be computed on time before the next system-level audio callback arrives, -but render quantum size does not have to be a divisor of the system-level audio callback buffer size. - -> **Info** -> -> Concept of system-level audio callback does not apply to [`OfflineAudioContext`](/docs/core/offline-audio-context). - -## Properties - -| Name | Type | Description | | -| :----: | :----: | :-------- | :-: | -| `currentTime` | `number` | Double value representing an ever-increasing hardware time in seconds, starting from 0. | | -| `destination` | [`AudioDestinationNode`](/docs/destinations/audio-destination-node) | Final output destination associated with the context. | | -| `sampleRate` | `number` | Float value representing the sample rate (in samples per seconds) used by all nodes in this context. | | -| `state` | [`ContextState`](/docs/core/base-audio-context#contextstate) | Enumerated value represents the current state of the context. | | - -## Methods - -### `createAnalyser` - -Creates [`AnalyserNode`](/docs/analysis/analyser-node). - -#### Returns `AnalyserNode`. - -### `createBiquadFilter` - -Creates [`BiquadFilterNode`](/docs/effects/biquad-filter-node). - -#### Returns `BiquadFilterNode`. - -### `createBuffer` - -Creates [`AudioBuffer`](/docs/sources/audio-buffer). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `numOfChannels` | `number` | An integer representing the number of channels of the buffer. | -| `length` | `number` | An integer representing the length of the buffer in sampleFrames. Two seconds buffer has length equals to `2 * sampleRate`. | -| `sampleRate` | `number` | A float representing the sample rate of the buffer. | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `NotSupportedError` | `numOfChannels` is outside the nominal range \[1, 32]. | -| `NotSupportedError` | `sampleRate` is outside the nominal range \[8000, 96000]. | -| `NotSupportedError` | `length` is less then 1. | - -#### Returns `AudioBuffer`. - -### `createBufferSource` - -Creates [`AudioBufferSourceNode`](/docs/sources/audio-buffer-source-node). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `options` | `{ pitchCorrection: boolean }` | Boolean that specifies if pitch correction has to be available. | - -#### Returns `AudioBufferSourceNode`. - -### `createBufferQueueSource` - -Creates [`AudioBufferQueueSourceNode`](/docs/sources/audio-buffer-queue-source-node). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `options` | `{ pitchCorrection: boolean }` | Boolean that specifies if pitch correction has to be available. | - -#### Returns `AudioBufferQueueSourceNode`. - -### `createConstantSource` - -Creates [`ConstantSourceNode`](/docs/sources/constant-source-node). - -#### Returns `ConstantSourceNode`. - -### `createConvolver` - -Creates [`ConvolverNode`](/docs/effects/convolver-node). - -#### Returns `ConvolverNode`. - -### `createDelay` - -Creates [`DelayNode`](/docs/effects/delay-node) - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `maxDelayTime` | `number` | Maximum amount of time to buffer delayed values| - -#### Returns `DelayNode` - -### `createGain` - -Creates [`GainNode`](/docs/effects/gain-node). - -#### Returns `GainNode`. - -### `createIIRFilter` - -Creates [`IIRFilterNode`](/docs/effects/iir-filter-node). - -#### Returns `IIRFilterNode`. - -### `createOscillator` - -Creates [`OscillatorNode`](/docs/sources/oscillator-node). - -#### Returns `OscillatorNode`. - -### `createPeriodicWave` - -Creates [`PeriodicWave`](/docs/effects/periodic-wave). This waveform specifies a repeating pattern that an OscillatorNode can use to generate its output sound. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `real` | `Float32Array` | An array of cosine terms. | -| `imag` | `Float32Array` | An array of sine terms. | -| `constraints` | [`PeriodicWaveConstraints`](/docs/core/base-audio-context#periodicwaveconstraints) | An object that specifies if normalization is disabled. If so, periodic wave will have maximum peak value of 1 and minimum peak value of -1.| - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `InvalidAccessError` | `real` and `imag` arrays do not have same length. | - -#### Returns `PeriodicWave`. - -### `createRecorderAdapter` - -Creates [`RecorderAdapterNode`](/docs/sources/recorder-adapter-node). - -#### Returns `RecorderAdapterNode` - -### `createStereoPanner` - -Creates [`StereoPannerNode`](/docs/effects/stereo-panner-node). - -#### Returns `StereoPannerNode`. - -### `createStreamer` - -Creates [`StreamerNode`](/docs/sources/streamer-node). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `options` | [`StreamerOptions`](/docs/sources/streamer-node#streameroptions) | Streamer options to initialize. | - -#### Returns `StreamerNode`. - -### `createWaveShaper` - -Creates [`WaveShaperNode`](/docs/effects/wave-shaper-node). - -#### Returns `WaveShaperNode`. - -### `createWorkletNode` - -Creates [`WorkletNode`](/docs/worklets/worklet-node). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `worklet` | `(Array, number) => void` | The worklet to be executed. | -| `bufferLength` | `number` | The size of the buffer that will be passed to the worklet on each call. | -| `inputChannelCount` | `number` | The number of channels that the node expects as input (it will get min(expected, provided)). | -| `workletRuntime` | `AudioWorkletRuntime` | The kind of runtime to use for the worklet. See [worklet runtimes](/docs/worklets/worklets-introduction#what-kind-of-worklets-are-used-in-react-native-audio-api) for details. | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `Error` | `react-native-worklet` is not found as dependency. | -| `NotSupportedError` | `bufferLength` \< 1. | -| `NotSupportedError` | `inputChannelCount` is not in range \[1, 32]. | - -#### Returns `WorkletNode`. - -### `createWorkletSourceNode` - -Creates [`WorkletSourceNode`](/docs/worklets/worklet-source-node). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `worklet` | `(Array, number, number, number) => void` | The worklet to be executed. | -| `workletRuntime` | `AudioWorkletRuntime` | The kind of runtime to use for the worklet. See [worklet runtimes](/docs/worklets/worklets-introduction#what-kind-of-worklets-are-used-in-react-native-audio-api) for details. | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `Error` | `react-native-worklet` is not found as dependency. | - -#### Returns `WorkletSourceNode`. - -### `createWorkletProcessingNode` - -Creates [`WorkletProcessingNode`](/docs/worklets/worklet-processing-node). - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `worklet` | `(Array, Array, number, number) => void` | The worklet to be executed. | -| `workletRuntime` | `AudioWorkletRuntime` | The kind of runtime to use for the worklet. See [worklet runtimes](/docs/worklets/worklets-introduction#what-kind-of-worklets-are-used-in-react-native-audio-api) for details. | - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `Error` | `react-native-worklet` is not found as dependency. | - -#### Returns `WorkletProcessingNode`. - -### `decodeAudioData` - -Decodes audio data from either a file path or an ArrayBuffer. The optional `sampleRate` parameter lets you resample the decoded audio. -If not provided, the audio will be automatically resampled to match the audio context's `sampleRate`. - -**For the list of supported formats visit [this page](/docs/utils/decoding).** - -Parameter -Type -Description - -input -ArrayBuffer -ArrayBuffer with audio data. - -string -Path to remote or local audio file. - -number -Asset module id. - -fetchOptions -[RequestInit](https://github.com/facebook/react-native/blob/ac06f3bdc76a9fd7c65ab899e82bff5cad9b94b6/packages/react-native/src/types/globals.d.ts#L265) -Additional headers parameters when passing url to fetch. - -#### Returns `Promise`. - -Example decoding - -```tsx -const url = ... // url to an audio - -const buffer = await audioContext.decodeAudioData(url); -``` - -### `decodePCMInBase64` - -Decodes base64-encoded PCM audio data. - -| Parameter | Type | Description | -|-----------|------|-------------| -| `base64String` | `string` | Base64-encoded PCM audio data. | -| `inputSampleRate` | `number` | Sample rate of the input PCM data. | -| `inputChannelCount` | `number` | Number of channels in the input PCM data. | -| `isInterleaved` | `boolean` | Whether the PCM data is interleaved. Default is `true`. | - -#### Returns `Promise` - -Example decoding with data in base64 format - -```tsx -const data = ... // data encoded in base64 string -// data is not interleaved (Channel1, Channel1, ..., Channel2, Channel2, ...) -const buffer = await this.audioContext.decodeAudioData(data, 4800, 2, false); -``` - -## Remarks - -#### `currentTime` - -* Timer starts when context is created, stops when context is suspended. - -### `ContextState` - -Details - -**Acceptable values:** - -* `suspended` - -The audio context has been suspended (with one of [`suspend`](/docs/core/audio-context#suspend) or [`OfflineAudioContext.suspend`](/docs/core/offline-audio-context#suspend)). - -* `running` - -The audio context is running normally. - -* `closed` - -The audio context has been closed (with [`close`](/docs/core/audio-context#close) method). - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/core/offline-audio-context -# Title: offline-audio-context - -# OfflineAudioContext - -The `OfflineAudioContext` interface inherits from [`BaseAudioContext`](/docs/core/base-audio-context). -In contrast with a standard [`AudioContext`](/docs/core/audio-context), it doesn't render audio to the device hardware. -Instead, it processes the audio as quickly as possible and outputs the result to an [`AudioBuffer`](/docs/sources/audio-buffer). - -## Constructor - -`OfflineAudioContext(options: OfflineAudioContextOptions)` - -```typescript -interface OfflineAudioContextOptions { - numberOfChannels: number; - length: number; // The length of the rendered AudioBuffer, in sample-frames - sampleRate: number; -} -``` - -## Properties - -`OfflineAudioContext` does not define any additional properties. -It inherits all properties from [`BaseAudioContext`](/docs/core/base-audio-context#properties). - -## Methods - -It inherits all methods from [`BaseAudioContext`](/docs/core/base-audio-context#methods). - -### `suspend` - -Schedules a suspension of the time progression in audio context at the specified time. - -| Parameter | Type | Description | -| :---: | :---: | :---- | -| `suspendTime` | `number` | A floating-point number specifying the suspend time, in seconds. | - -#### Returns `Promise`. - -### `resume` - -Resume time progression in audio context when it has been suspended. - -#### Returns `Promise` - -### `startRendering` - -Starts rendering the audio, taking into account the current connections and the current scheduled changes. - -#### Returns `Promise`. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/destinations/audio-destination-node -# Title: audio-destination-node - -# AudioDestinationNode - -The `AudioDestinationNode` interface represents the final destination of an audio graph, where all processed audio is ultimately directed. - -In most cases, this means the sound is sent to the system’s default output device, such as speakers or headphones. -When used with an [`OfflineAudioContext`](/docs/core/offline-audio-context) the rendered audio isn’t played back immediately—instead, -it is stored in an [`AudioBuffer`](/docs/sources/audio-buffer). - -Each `AudioContext` has exactly one AudioDestinationNode, which can be accessed through its -[`AudioContext.destination`](/docs/core/base-audio-context/#properties) property. - -#### [`AudioNode`](/docs/core/audio-node#read-only-properties) properties - -## Properties - -`AudioDestinationNode` does not define any additional properties. -It inherits all properties from [`AudioNode`](/docs/core/audio-node), listed above. - -## Methods - -`AudioDestinationNode` does not define any additional methods. -It inherits all methods from [`AudioNode`](/docs/core/audio-node). - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/biquad-filter-node -# Title: biquad-filter-node - -# BiquadFilterNode - -The `BiquadFilterNode` interface represents a low-order filter. It is an [`AudioNode`](/docs/core/audio-node) used for tone controls, graphic equalizers, and other audio effects. -Multiple `BiquadFilterNode` instances can be combined to create more complex filtering chains. - -#### [`AudioNode`](/docs/core/audio-node#read-only-properties) properties - -## Constructor - -```tsx -constructor(context: BaseAudioContext, options?: BiquadFilterOptions) -``` - -### `BiquadFilterOptions` - -Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions) - -| Parameter | Type | Default | | -| :---: | :---: | :----: | :---- | -| `Q` | `number` | 1 | Initial value for [`Q`](/docs/effects/biquad-filter-node#properties) | -| `detune` | `number` | 0 | Initial value for [`detune`](/docs/effects/biquad-filter-node#properties) | -| `frequency` | `number` | 350 | Initial value for [`frequency`](/docs/effects/biquad-filter-node#properties) | -| `gain` | `number` | 0 | Initial value for [`gain`](/docs/effects/biquad-filter-node#properties) | -| `type` | `BiquadFilterType` | `lowpass` | Initial value for [`type`](/docs/effects/biquad-filter-node#properties) | - -Or by using `BaseAudioContext` factory method: -[`BaseAudioContext.createBiquadFilter()`](/docs/core/base-audio-context#createbiquadfilter) that creates node with default values. - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Rate | Description | -| :--: | :--: | :----------: | :-- | -| `frequency` | [`AudioParam`](/docs/core/audio-param) | [`k-rate`](/docs/core/audio-param#a-rate-vs-k-rate) | The filter’s cutoff or center frequency in hertz (Hz). | -| `detune` | [`AudioParam`](/docs/core/audio-param) | [`k-rate`](/docs/core/audio-param#a-rate-vs-k-rate) | Amount by which the frequency is detuned in cents. | -| `Q` | [`AudioParam`](/docs/core/audio-param) | [`k-rate`](/docs/core/audio-param#a-rate-vs-k-rate) | The filter’s Q factor (quality factor). | -| `gain` | [`AudioParam`](/docs/core/audio-param) | [`k-rate`](/docs/core/audio-param#a-rate-vs-k-rate) | Gain applied by specific filter types, in decibels (dB). | -| `type` | [`BiquadFilterType`](#biquadfiltertype-enumeration-description) | — | Defines the kind of filtering algorithm the node applies (e.g. `"lowpass"`, `"highpass"`). | - -#### BiquadFilterType enumeration description - -Note: The detune parameter behaves the same way for all filter types, so it is not repeated below. -| `type` | Description | `frequency` | `Q` | `gain` | -|:------:|:-----------:|:-----------:|:---:|:------:| -| `lowpass` | Second-order resonant lowpass filter with 12dB/octave rolloff. Frequencies below the cutoff pass through; higher frequencies are attenuated. | The cutoff frequency. | Determines how peaked the frequency is around the cutoff. Higher values result in a sharper peak. | Not used | -| `highpass` | Second-order resonant highpass filter with 12dB/octave rolloff. Frequencies above the cutoff pass through; lower frequencies are attenuated. | The cutoff frequency. | Determines how peaked the frequency is around the cutoff. Higher values result in a sharper peak. | Not used | -| `bandpass` | Second-order bandpass filter. Frequencies within a given range pass through; others are attenuated. | The center of the frequency band. | Controls the bandwidth. Higher values result in a narrower band. | Not used | -| `lowshelf` | Second-order lowshelf filter. Frequencies below the cutoff are boosted or attenuated; others remain unchanged. | The upper limit of the frequencies where the boost (or attenuation) is applied. | Not used | The boost (in dB) to be applied. Negative values attenuate the frequencies.| -| `highshelf` | Second-order highshelf filter. Frequencies above the cutoff are boosted or attenuated; others remain unchanged. | The lower limit of the frequencies where the boost (or attenuation) is applied. | Not used | The boost (in dB) to be applied. Negative values attenuate the frequencies. | -| `peaking` | Frequencies around a center frequency are boosted or attenuated; others remain unchanged. | The center of the frequency range where the boost (or an attenuation) is applied. | Controls the bandwidth. Higher values result in a narrower band. | The boost (in dB) to be applied. Negative values attenuate the frequencies. | -| `notch` | Notch (band-stop) filter. Opposite of a bandpass filter: frequencies around the center are attenuated; others remain unchanged. | The center of the frequency range where the notch is applied. | Controls the bandwidth. Higher values result in a narrower band. | Not used | -| `allpass` | Second-order allpass filter. All frequencies pass through, but changes the phase relationship between the various frequencies. | The frequency where the center of the phase transition occurs (maximum group delay). | Controls how sharp the phase transition is at the center frequency. Higher values result in a sharper transition and a larger group delay. | Not used | - -## Methods - -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -### `getFrequencyResponse` - -| Parameter | Type | Description | -| :--------: | :--: | :---------- | -| `frequencyArray` | `Float32Array` | Array of frequencies (in Hz), which you want to filter. | -| `magResponseOutput` | `Float32Array` | Output array to store the computed linear magnitude values for each frequency. For frequencies outside the range \[0, $\frac$], the corresponding results are NaN. | -| `phaseResponseOutput` | `Float32Array` | Output array to store the computed phase response values (in radians) for each frequency. For frequencies outside the range \[0, $\frac$], the corresponding results are NaN. | - -#### Returns `undefined`. - -## Remarks - -#### `frequency` - -* Range: \[10, $\frac$]. - -#### `Q` - -* Range: - * For `lowpass` and `highpass` is \[-Q, Q], where Q is the largest value for which $10^$ does not overflow the single-precision floating-point representation. - Numerically: Q ≈ 770.63678. - * For `bandpass`, `notch`, `allpass`, and `peaking`: Q is related to the filter’s bandwidth and should be positive. - * Not used for `lowshelf` and `highshelf`. - -#### `gain` - -* Range: \[-40, 40]. -* Positive values correspond to amplification; negative to attenuation. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/convolver-node -# Title: convolver-node - -# ConvolverNode - -The `ConvolverNode` interface represents a linear convolution effect, that can be applied to a signal given an impulse response. -This is the easiest way to achieve `echo` or [`reverb`](https://en.wikipedia.org/wiki/Reverb_effect) effects. - -#### [`AudioNode`](/docs/core/audio-node#properties) properties - -> **Info** -> -> Convolver is a node with tail-time, which means, that it continues to output non-silent audio with zero input for the length of the buffer. - -## Constructor - -```tsx -constructor(context: BaseAudioContext, options?: ConvolverOptions) -``` - -### `ConvolverOptions` - -Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions) - -| Parameter | Type | Default | | -| :---: | :---: | :----: | :---- | -| `buffer` | `number` | | Initial value for [`buffer`](/docs/effects/convolver-node#properties). | -| `normalize` | `boolean` | true | Initial value for [`normalize`](/docs/effects/convolver-node#properties). | - -Or by using `BaseAudioContext` factory method: -[`BaseAudioContext.createConvolver()`](/docs/core/base-audio-context#createconvolver) - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Description | -| :----: | :----: | :-------- | -| `buffer` | [`AudioBuffer`](/docs/sources/audio-buffer) | Associated AudioBuffer. | -| `normalize` | `boolean` | Whether the impulse response from the buffer will be scaled by an equal-power normalization when the buffer attribute is set. | - -> **Caution** -> -> Linear convolution is a heavy computational process, so if your audio has some weird artefacts that should not be there, try to decrease the duration of impulse response buffer. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/delay-node -# Title: delay-node - -# DelayNode - -The `DelayNode` interface represents the latency of the audio signal by given time. It is an [`AudioNode`](/docs/core/audio-node) that applies time shift to incoming signal f.e. -if `delayTime` value is 0.5, it means that audio will be played after 0.5 seconds. - -#### [`AudioNode`](/docs/core/audio-node#properties) properties - -> **Info** -> -> Delay is a node with tail-time, which means, that it continues to output non-silent audio with zero input for the duration of `delayTime`. - -## Constructor - -[`BaseAudioContext.createDelay(maxDelayTime?: number)`](/docs/core/base-audio-context#createdelay) - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Description | -| :----: | :----: | :-------- | -| `delayTime`| [`AudioParam`](/docs/core/audio-param) | [`k-rate`](/docs/core/audio-param#a-rate-vs-k-rate) `AudioParam` representing value of time shift to apply. | - -> **Warning** -> -> In web audio api specs `delayTime` is an `a-rate` param. - -## Methods - -`DelayNode` does not define any additional methods. -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -## Remarks - -#### `maxDelayTime` - -* Default value is 1.0. -* Nominal range is 0 - 180. - -#### `delayTime` - -* Default value is 0. -* Nominal range is 0 - `maxDelayTime`. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/gain-node -# Title: gain-node - -# GainNode - -The `GainNode` interface represents a change in volume (amplitude) of the audio signal. It is an [`AudioNode`](/docs/core/audio-node) with a single `gain` [`AudioParam`](/docs/core/audio-param) that multiplies every sample passing through it. - -> **Tip** -> -> Direct, immediate gain changes often cause audible clicks. Use the scheduling methods of [`AudioParam`](/docs/core/audio-param) (e.g. `linearRampToValueAtTime`, `exponentialRampToValueAtTime`) to smoothly interpolate volume transitions. - -#### [`AudioNode`](/docs/core/audio-node#properties) properties - -## Constructor - -```tsx -constructor(context: BaseAudioContext, options?: GainOptions) -``` - -### `GainOptions` - -Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions) - -| Parameter | Type | Default | | -| :---: | :---: | :----: | :---- | -| `gain` | `number` | `1.0` | Initial value for [`gain`](/docs/effects/gain-node#properties) | - -You can also create a `GainNode` via the [`BaseAudioContext.createGain()`](/docs/core/base-audio-context#creategain) factory method, which uses default values. - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Description | | -| :----: | :----: | :-------- | :-: | -| `gain` | [`AudioParam`](/docs/core/audio-param) | [`a-rate`](/docs/core/audio-param#a-rate-vs-k-rate) `AudioParam` representing the gain value to apply. | | - -## Methods - -`GainNode` does not define any additional methods. -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -## Usage - -A common use case is controlling the master volume of an audio graph: - -```tsx -const audioContext = new AudioContext(); -const gainNode = audioContext.createGain(); - -// Set volume to 50% -gainNode.gain.setValueAtTime(0.5, audioContext.currentTime); - -// Connect source → gain → output -source.connect(gainNode); -gainNode.connect(audioContext.destination); -``` - -To fade in a sound over 2 seconds: - -```tsx -gainNode.gain.setValueAtTime(0, audioContext.currentTime); -gainNode.gain.linearRampToValueAtTime(1, audioContext.currentTime + 2); -``` - -## Remarks - -#### `gain` - -* Nominal range is -∞ to ∞. -* Values greater than `1.0` amplify the signal; values between `0` and `1.0` attenuate it. -* A value of `0` silences the signal. Negative values invert the signal phase. - -## Advanced usage — Envelope (ADSR) - -`GainNode` is the key building block for implementing sound envelopes. For a practical, step-by-step walkthrough of ADSR envelopes and how to apply them in a real app, see the [Making a piano keyboard](/docs/guides/making-a-piano-keyboard#envelopes-) guide. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/iir-filter-node -# Title: iir-filter-node - -# IIRFilterNode - -The `IIRFilterNode` interface represents a general infinite impulse response (IIR) filter. -It is an [`AudioNode`](/docs/core/audio-node) used for tone controls, graphic equalizers, and other audio effects. -`IIRFilterNode` lets the parameters of the filter response be specified, so that it can be tuned as needed. - -In general, it is recommended to use [`BiquadFilterNode`](/docs/effects/biquad-filter-node) for implementing higher-order filters, -as it is less sensitive to numeric issues and its parameters can be automated. You can create all even-order IIR filters with `BiquadFilterNode`, -but if odd-ordered filters are needed or automation is not needed, then `IIRFilterNode` may be appropriate. - -## Constructor - -[`BaseAudioContext.createIIRFilter(options: IIRFilterNodeOptions)`](/docs/core/base-audio-context#createiirfilter) - -```jsx -interface IIRFilterNodeOptions { - feedforward: number[]; // array of floating-point values specifying the feedforward (numerator) coefficients - feedback: number[]; // array of floating-point values specifying the feedback (denominator) coefficients -} -``` - -#### Errors - -| Error type | Description | -| :---: | :---- | -| `NotSupportedError` | One or both of the input arrays exceeds 20 members. | -| `InvalidStateError` | All of the feedforward coefficients are 0, or the first feedback coefficient is 0. | - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -## Methods - -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -### `getFrequencyResponse` - -| Parameter | Type | Description | -| :--------: | :--: | :---------- | -| `frequencyArray` | `Float32Array` | Array of frequencies (in Hz), which you want to filter. | -| `magResponseOutput` | `Float32Array` | Output array to store the computed linear magnitude values for each frequency. For frequencies outside the range \[0, $\frac$], the corresponding results are NaN. | -| `phaseResponseOutput` | `Float32Array` | Output array to store the computed phase response values (in radians) for each frequency. For frequencies outside the range \[0, $\frac$], the corresponding results are NaN. | - -#### Returns `undefined`. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/periodic-wave -# Title: periodic-wave - -# PeriodicWave - -The `PeriodicWave` interface defines a periodic waveform that can be used to shape the output of an OscillatorNode. - -## Constructor - -```tsx -constructor(context: BaseAudioContext, options: PeriodicWaveOptions) -``` - -### `PeriodicWaveOptions` - -| Parameter | Type | Default | Description | -| :---: | :---: | :----: | :---- | -| `real` | `Float32Array` | - | [Cosine terms](/docs/core/base-audio-context#createperiodicwave) | -| `imag` | `Float32Array` | - | [Sine terms](/docs/core/base-audio-context#createperiodicwave) | -| `disableNormalization` | `boolean` | false | Whether the periodic wave is [normalized](/docs/core/base-audio-context#createperiodicwave) or not. | - -Or by using `BaseAudioContext` factory method: -[`BaseAudioContext.createPeriodicWave(real, imag, constraints?: PeriodicWaveConstraints)`](/docs/core/base-audio-context#createperiodicwave) - -## Properties - -None. `PeriodicWave` has no own or inherited properties. - -## Methods - -None. `PeriodicWave` has no own or inherited methods. - -## Remarks - -#### `real` and `imag` - -* if only one is specified, the other one is treated as array of 0s of the same length -* if neither is given values are equivalent to the sine wave -* if both given, they have to have the same length -* to see how values corresponds to the output wave [see](https://webaudio.github.io/web-audio-api/#waveform-generation) for more information - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/stereo-panner-node -# Title: stereo-panner-node - -# StereoPannerNode - -The `StereoPannerNode` interface represents the change in ratio between two output channels (f. e. left and right speaker). - -#### [`AudioNode`](/docs/core/audio-node#properties) properties - -## Constructor - -```tsx -constructor(context: BaseAudioContext, stereoPannerOptions?: StereoPannerOptions) -``` - -### `StereoPannerOptions` - -Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions) - -| Parameter | Type | Default | Description | -| :---: | :---: | :----: | :---- | -| `pan` | `number` | - | Number representing pan value | - -Or by using `BaseAudioContext` factory method: -[`BaseAudioContext.createStereoPanner()`](/docs/core/base-audio-context#createstereopanner) - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Description | -| :--: | :--: | :---------- | -| `pan` | [`AudioParam`](/docs/core/audio-param) | [`a-rate`](/docs/core/audio-param#a-rate-vs-k-rate) `AudioParam` representing how the audio signal is distributed between the left and right channels. | - -## Methods - -`StereoPannerNode` does not define any additional methods. -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -## Remarks - -#### `pan` - -* Default value is 0 -* Nominal range is -1 (only left channel) to 1 (only right channel). - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/effects/wave-shaper-node -# Title: wave-shaper-node - -# WaveShaperNode - -The `WaveShaperNode` interface represents non-linear signal distortion effects. -Non-linear distortion is commonly used for both subtle non-linear warming, or more obvious distortion effects. - -#### [`AudioNode`](/docs/core/audio-node#properties) properties - -## Constructor - -```tsx -constructor(context: BaseAudioContext, waveShaperOptions?: WaveShaperOptions) -``` - -### `WaveShaperOptions` - -Inherits all properties from [`AudioNodeOptions`](/docs/core/audio-node#audionodeoptions) - -| Parameter | Type | Default | Description | -| :---: | :---: | :----: | :---- | -| `curve` | `Float32Array` | - | Array representing curve values | -| `oversample` | [`OverSampleType`](/docs/effects/wave-shaper-node#oversampletype) | - | Value representing oversample property | - -Or by using `BaseAudioContext` factory method: -[`BaseAudioContext.createStereoPanner()`](/docs/core/base-audio-context#createwaveshaper) - -## Properties - -It inherits all properties from [`AudioNode`](/docs/core/audio-node#properties). - -| Name | Type | Description | -| :--: | :--: | :---------- | -| `curve` | `Float32Array \| null` | The shaping curve used for waveshaping effect. | -| `oversample` | [`OverSampleType`](/docs/effects/wave-shaper-node#oversampletype) | Specifies what type of oversampling should be used when applying shaping curve. | - -## Methods - -`WaveShaperNode` does not define any additional methods. -It inherits all methods from [`AudioNode`](/docs/core/audio-node#methods). - -## Remarks - -#### `curve` - -* Default value is null -* Contains at least two values. -* Subsequent modifications of curve have no effects. To change the curve, assign a new Float32Array object to this property. - -#### `oversample` - -* Default value `none` -* Value of `2x` or `4x` can increases quality of the effect, but in some cases it is better not to use oversampling for very accurate shaping curve. - -### `OverSampleType` - -Type definitions - -```typescript -// Do not oversample | Oversample two times | Oversample four times -type OverSampleType = 'none' | '2x' | '4x'; -``` - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/fundamentals/best-practices -# Title: best-practices - -# Best Practices - -When working with audio in a web or mobile application, following best practices ensures optimal performance, -user experience, and maintainability. Here are some key best practices to consider when using the React Native Audio API: - -## [**AudioContext**](/docs/core/audio-context) Management - -* **Single Audio Context**: Create one instance of `AudioContext` in order to easily and efficiently manage the audio layer's state in your application. - Creating many instances could lead to undefined behavior. Same of them could still be in [`running`](/docs/core/base-audio-context#contextstate) state while others could be - [`suspended`](/docs/core/base-audio-context#contextstate) or [`closed`](/docs/core/base-audio-context#contextstate), if you do not manage them by yourself. - -* **Clean up**: Always close the `AudioContext` using the [`close()`](/docs/core/audio-context#close) method when it is no longer needed. - This releases system audio resources and prevents memory leaks. - -* **Suspend when not in use**: Suspend the `AudioContext` when audio is not needed to save system resources and battery life, especially on mobile devices. - Running `AudioContext` is still playing silence even if there is no playing source node connected to the [`destination`](/docs/core/base-audio-context#properties). - Additionally, on iOS devices, the state of the `AudioContext` is directly related with state of the lock screen. If running `AudioContext` exists, it is impossible to set lock screen state to [`state_paused`](/docs/system/audio-manager#lockscreeninfo). - -## React hooks vs React Native Audio API - -* **Create singleton class to manage audio layer**: Instead of storing `AudioContext` or nodes directly in your React components using `useState` or `useRef`, - consider creating a singleton class that encapsulates the audio layer logic using React Native Audio API. - This class can manage the lifecycle of the `AudioContext`, handle audio nodes, and provide methods for playing, pausing, and stopping audio. - This approach promotes separation of concerns and makes it easier to manage audio state across your application. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/fundamentals/getting-started -# Title: getting-started - -# Getting started - -The goal of *Fundamentals* is to guide you through the setup process of the Audio API, as well as to show the basic concepts behind audio programming using a web audio framework, giving you the confidence to explore more advanced use cases on your own. This section is packed with interactive examples, code snippets, and explanations. Are you ready? Let's make some noise! - -## Installation - -It takes only a few steps to add Audio API to your project: - -### Step 1: Install the package - -Install the `react-native-audio-api` package from npm: - -```sh -npx expo install react-native-audio-api -``` - -```sh -npm install react-native-audio-api -``` - -```sh -yarn add react-native-audio-api -``` - -### Step 2: Add Audio API expo plugin (optional) - -Add `react-native-audio-api` expo plugin to your `app.json` or `app.config.js`. - -app.json - -```javascript -{ - "plugins": [ - [ - "react-native-audio-api", - { - "iosBackgroundMode": true, - "iosMicrophonePermission": "This app requires access to the microphone to record audio.", - "androidPermissions" : [ - "android.permission.MODIFY_AUDIO_SETTINGS", - "android.permission.FOREGROUND_SERVICE", - "android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK" - ], - "androidForegroundService": true, - "androidFSTypes": [ - "mediaPlayback" - ] - } - ] - ] -} -``` - -app.config.js - -```javascript -export default { - ... - "plugins": [ - [ - "react-native-audio-api", - { - "iosBackgroundMode": true, - "iosMicrophonePermission": "This app requires access to the microphone to record audio.", - "androidPermissions" : [ - "android.permission.MODIFY_AUDIO_SETTINGS", - "android.permission.FOREGROUND_SERVICE", - "android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK" - ], - "androidForegroundService": true, - "androidFSTypes": [ - "mediaPlayback" - ] - } - ] - ] -}; -``` - -#### Special permissions - -If you plan to use [`AudioRecorder`](/docs/inputs/audio-recorder) entry `iosMicrophonePermission` and `android.permission.RECORD_AUDIO` in `androidPermissions` section is **MANDATORY**. - -> **Info** -> -> If your app is not managed by expo, see [non-expo-permissions page](/docs/other/non-expo-permissions) how to handle permissions. - -Read more about plugin [here](/docs/other/audio-api-plugin)! - -### Step 3: Install system-wide bash (only Windows OS) - -There are many ways to do that f.e. using git bash. To make sure just test if any unix command works. - -```bash -bash -c 'echo Hello World!' -``` - -### Possible additional dependencies - -If you plan to use any of [`WorkletNode`](/docs/worklets/worklet-node), [`WorkletSourceNode`](/docs/worklets/worklet-source-node), [`WorkletProcessingNode`](/docs/worklets/worklet-processing-node), it is required to have -`react-native-worklets` library set up with version 0.6.0 or higher. See [worklets getting-started page](https://docs.swmansion.com/react-native-worklets/docs/) for info how to do it. - -> **Info** -> -> If you are not planning to use any of mentioned nodes, `react-native-worklets` dependency is **OPTIONAL** and your app will build successfully without them. - -### Usage with expo - -`react-native-audio-api` contains native custom code and isn't part of the Expo Go application. In order to be available in expo managed builds, you have to use Expo development build. Simplest way on starting local expo dev builds, is to use: - -```sh -npx expo run:ios -``` - -```sh -npx expo run:android -``` - -To learn more about expo development builds, please check out [Development Builds Documentation](https://docs.expo.dev/develop/development-builds/introduction/). - -#### Android - -No further steps are necessary. - -#### iOS - -While developing for iOS, make sure to install [pods](https://cocoapods.org) first before running the app: - -```sh -cd ios && pod install && cd .. -``` - -#### Web - -No further steps are necessary. - -> **Caution** -> -> `react-native-audio-api` on the web exposes the browser's built-in Web Audio API, but for compatibility between platforms, it limits the available interfaces to APIs that are implemented on iOS and Android. - -### Clear Metro bundler cache (recommended) - -```sh -npx expo start -c -``` - -```sh -npm start -- --reset-cache -``` - -```sh -yarn start --reset-cache -``` - -## What's next? - -In [the next section](/docs/guides/lets-make-some-noise), we will learn how to prepare Audio API and to play some sound!. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/fundamentals/introduction -# Title: introduction - -# Introduction - -React Native Audio API is an imperative, high-level API for processing and synthesizing audio in React Native Applications. React Native Audio API follows the [Web Audio Specification](https://www.w3.org/TR/webaudio-1.1/) making it easier to write audio-heavy applications for iOS, Android and Web with just one codebase. - -## Highlights - -* Supports react-native, react-native-web or any web react based project -* API strictly follows the Web Audio API standard -* Blazingly fast, all of the Audio API core is written in C++ to deliver the best performance possible -* Truly native, we use most up-to-date native apis such as AVFoundation, CoreAudio or Oboe -* Modular routing architecture to fit simple (and complex) use-cases -* Sample-accurate scheduled sound playback with low-latency for musical applications requiring the highest degree of rhythmic precision. -* Efficient real-time time-domain and frequency-domain analysis / visualization -* Efficient biQuad filters for most common filtering methods. -* Support for computational audio synthesis - -## Motivation - -By aligning with the Web Audio specification, we're creating a single API that works seamlessly across native iOS, Android, browsers, and even standalone desktop applications. The React Native ecosystem currently lacks a high-performance API for creating audio, adding effects, or controlling basic parameters like volume for each audio separately - and we're here to bridge that gap! - -## Alternatives - -### Expo Audio - -[Expo Audio](https://docs.expo.dev/versions/latest/sdk/audio/) might be a better fit for you, if you are looking for simple playback functionality, as its simple and well documented API makes it easy to use. - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/guides/create-your-own-effect -# Title: create-your-own-effect - -# Create your own effect - -In this section, we will create our own [`pure C++ turbo-module`](https://reactnative.dev/docs/the-new-architecture/pure-cxx-modules) and use it to create custom processing node that can change sound whatever you want. - -### Prerequisites - -We highly encourage you to get familiar with [this guide](https://reactnative.dev/docs/the-new-architecture/pure-cxx-modules), since we will be using many similar concepts that are explained here. - -## Generate files - -We prepared a script that generates all of the boiler plate code for you. -Only parts that will be needed by you, are: - -* customizing processor to your tasks -* configuring [`codegen`](https://reactnative.dev/docs/the-new-architecture/what-is-codegen) with your project -* writing native specific code to compile those files - -```bash -npx rn-audioapi-custom-node-generator create -o # path where you want files to be generated, usually same level as android/ and ios/ -``` - -## Analyzing generated files - -You should see two directories: - -* `shared/` - it contains c++ files (source code for custom effect and JSI layer - Host Objects, needed to communicate with JavaScript) -* `specs/` - defines typescript interface that will invoke c++ code in JavaScript - -> **Caution** -> -> Name of the file in `specs/` has to start with `Native` to be seen by codegen. - -The most important file is `MyProcessorNode.cpp`, it contains main processing part that directly manipulates raw data. - -In this guide, we will edit files in order to achieve [`GainNode`](/docs/effects/gain-node) functionality. -For the sake of a simplicity, we will use value as a raw `double` type, not wrapped in [`AudioParam`](/docs/core/audio-param). - -MyProcessorNode.h - -```cpp -#pragma once -#include - -namespace audioapi { -class AudioBuffer; - -class MyProcessorNode : public AudioNode { -public: - explicit MyProcessorNode(const std::shared_ptr &context, ); - -protected: - std::shared_ptr - processNode(const std::shared_ptr &buffer, - int framesToProcess) override; - -// highlight-start -private: - double gain; // value responsible for gain value -// highlight-end -}; -} // namespace audioapi -``` - -MyProcessorNode.cpp - -```cpp -#include "MyProcessorNode.h" -#include -#include - -namespace audioapi { - MyProcessorNode::MyProcessorNode(const std::shared_ptr &context) - //highlight-next-line - : AudioNode(context), gain(0.5) { - isInitialized_.store(true, std::memory_order_release); - } - - std::shared_ptr MyProcessorNode::processNode(const std::shared_ptr &buffer, - int framesToProcess) { - // highlight-start - for (int channel = 0; channel < buffer->getNumberOfChannels(); ++channel) { - auto *audioArray = bus->getChannel(channel); - for (size_t i = 0; i < framesToProcess; ++i) { - // Apply gain to each sample in the audio array - (*audioArray)[i] *= gain; - } - } - // highlight-end - } -} // namespace audioapi -``` - -MyProcessorNodeHostObject.h - -```cpp -#pragma once - -#include "MyProcessorNode.h" -#include - -#include -#include - -namespace audioapi { -using namespace facebook; - -class MyProcessorNodeHostObject : public AudioNodeHostObject { -public: - explicit MyProcessorNodeHostObject( - const std::shared_ptr &node) - : AudioNodeHostObject(node) { - // highlight-start - addGetters(JSI_EXPORT_PROPERTY_GETTER(MyProcessorNodeHostObject, getter)); - addSetters(JSI_EXPORT_PROPERTY_SETTER(MyProcessorNodeHostObject, setter)); - // highlight-end - } - - // highlight-start - JSI_PROPERTY_GETTER(getter) { - auto processorNode = std::static_pointer_cast(node_); - return {processorNode->someGetter()}; - } - // highlight-end - - // highlight-start - JSI_PROPERTY_SETTER(setter) { - auto processorNode = std::static_pointer_cast(node_); - processorNode->someSetter(value.getNumber()); - } - // highlight-end -}; -} // namespace audioapi -``` - -## Codegen - -Onboarding codegen doesn't require anything special in regards to basic [react-native tutorial](https://reactnative.dev/docs/the-new-architecture/pure-cxx-modules#2-configure-codegen) - -## Native files - -### iOS - -When it comes to iOS there is also nothing more than following [react-native tutorial](https://reactnative.dev/docs/the-new-architecture/pure-cxx-modules#ios) - -### Android - -Case with android is much different, because of the way android is compiled we need to compile our library with whole turbo-module. -Firstly, follow [the guide](https://reactnative.dev/docs/the-new-architecture/pure-cxx-modules#android), but replace `CmakeLists.txt` with this content: - -```cmake -cmake_minimum_required(VERSION 3.13) - -project(appmodules) - -set(ROOT ${CMAKE_SOURCE_DIR}/../../../../..) -set(AUDIO_API_DIR ${ROOT}/node_modules/react-native-audio-api) - -include(${REACT_ANDROID_DIR}/cmake-utils/ReactNative-application.cmake) - -target_sources(${CMAKE_PROJECT_NAME} PRIVATE - ${ROOT}/shared/NativeAudioProcessingModule.cpp - ${ROOT}/shared/MyProcessorNode.cpp - ${ROOT}/shared/MyProcessorNodeHostObject.cpp -) - -target_include_directories(${CMAKE_PROJECT_NAME} PUBLIC - ${ROOT}/shared - ${AUDIO_API_DIR}/common/cpp -) - -add_library(react-native-audio-api SHARED IMPORTED) -string(TOLOWER ${CMAKE_BUILD_TYPE} BUILD_TYPE_LOWER) -# we need to import built library from android directory -set_target_properties(react-native-audio-api PROPERTIES IMPORTED_LOCATION - ${AUDIO_API_DIR}/android/build/intermediates/merged_native_libs/${BUILD_TYPE_LOWER}/merge${CMAKE_BUILD_TYPE}NativeLibs/out/lib/${CMAKE_ANDROID_ARCH_ABI}/libreact-native-audio-api.so -) -target_link_libraries(${CMAKE_PROJECT_NAME} react-native-audio-api android log) -``` - -Last part that is required for you to do, is to add following lines to `build.gradle` file located in `android/app` directory. - -```Cmake -evaluationDependsOn(":react-native-audio-api") - -afterEvaluate { - tasks.getByName("buildCMakeDebug").dependsOn(findProject(":react-native-audio-api").tasks.getByName("mergeDebugNativeLibs")) - tasks.getByName("buildCMakeRelWithDebInfo").dependsOn(findProject(":react-native-audio-api").tasks.getByName("mergeReleaseNativeLibs")) -} -``` - -Since in `CmakeLists.txt` we depend on libreact-native-audio-api.so, we need to make sure that building an app will be invoked after library is existing. - -## Final touches - -Last part is to finally onboard your custom module to your app, by creating typescript interface that would map c++ layer. - -```typescript -// types.ts -import { AudioNode, BaseAudioContext } from "react-native-audio-api"; -import { IAudioNode, IBaseAudioContext } from "react-native-audio-api/lib/typescript/interfaces"; - -export interface IMyProcessorNode extends IAudioNode { - gain: number; -} - -export class MyProcessorNode extends AudioNode { - constructor(context: BaseAudioContext, node: IMyProcessorNode) { - super(context, node); - } - - public set gain(value: number) { - (this.node as IMyProcessorNode).gain = value; - } - - public get gain(): number { - return (this.node as IMyProcessorNode).gain; - } -} - -declare global { - var createCustomProcessorNode: (context: IBaseAudioContext) => IMyProcessorNode; -} -``` - -## Example - -```tsx -import { - AudioContext, - OscillatorNode, -} from 'react-native-audio-api'; -import { MyProcessorNode } from './types'; - -function App() { - const audioContext = new AudioContext(); - const oscillator = audioContext.createOscillator(); - // constructor is put in global scope - const processor = new MyProcessorNode(audioContext, global.createCustomProcessorNode(audioContext.context)); - oscillator.connect(processor); - processor.connect(audioContext.destination); - oscillator.start(audioContext.currentTime); -} -``` - -**Check out fully working [demo app](https://github.com/software-mansion-labs/custom-processor-node-example)** - -## What's next? - -I’m not sure, but give yourself a pat on the back – you’ve earned it! More guides are on the way, so stay tuned! 🎼 - - ---- -# URL: https://docs.swmansion.com/react-native-audio-api/docs/guides/lets-make-some-noise -# Title: lets-make-some-noise - -# Let's make some noise! - -In this section, we will guide you through the basic concepts of Audio API. We are going to use core audio components such as [`AudioContext`](/docs/core/audio-context) and [`AudioBufferSourceNode`](/docs/sources/audio-buffer-source-node) to simply play sound from a file, which will help you develop a basic understanding of the library. - -## Using audio context - -Let's start by bootstrapping a simple application with a play button and creating our first instance of `AudioContext` object. - -```jsx -import React from 'react'; -import { View, Button } from 'react-native'; -// highlight-next-line -import { AudioContext } from 'react-native-audio-api'; - -export default function App() { - const handlePlay = async () => { - // highlight-next-line - const audioContext = new AudioContext(); - }; - - return ( - -