如何使用AudioWorklet获取麦克风音量

2022-09-23 00:00:00 javascript web-audio-api

我感兴趣的是在Java脚本中连续读取麦克风音量。StackOverflow上的许多现有解决方案(请参阅here、here和here)使用自2014年起不推荐使用的BaseAudioContext.createScriptProcessor()

我想在我的项目中使用面向未来的代码,所以有谁可以分享一个现代的最小示例,说明如何使用新的AudioWorkletNode读取麦克风音量?


解决方案

让我们来看看需要了解的几点:

  • 所有这些更改都是为了避免延迟,创建自己的线程,即在音频呈现线程(AudioWorkletGlobalScope)上运行。
  • 这种新的实现方式由两部分组成:AudioWorkletProcessor和AudioWorkletNode。
  • AudioWorkletNode至少需要两项内容:AudioContext对象和字符串形式的处理器名称。可以通过新Audio Worklet对象的addModule()调用加载和注册处理器定义。
  • 包括AudioWorklet在内的Worklet API仅在安全上下文中可用。在本例中,我们可以使用本地主机,但有必要了解这一点。
  • 我们至少需要从AudioWorkletProcessor向AudioWorkletNode传递当前值或音量,以便对其执行任何操作。
  • 必须使用navigator.getUserMedia
  • 访问您计算机的麦克风
/** Declare a context for AudioContext object */
let audioContext
// Creating a list of colors for led
const ledColor = [
    "#064dac",
    "#064dac",
    "#064dac",
    "#06ac5b",
    "#15ac06",
    "#4bac06",
    "#80ac06",
    "#acaa06",
    "#ac8b06",
    "#ac5506",
]
let isFirtsClick = true
let listeing = false

function onMicrophoneDenied() {
    console.log('denied')
}

/**
 * This method updates leds
 * depending the volume detected
 * 
 * @param {Float} vol value of volume detected from microphone
 */
function leds(vol) {
    let leds = [...document.getElementsByClassName('led')]
    let range = leds.slice(0, Math.round(vol))

    for (var i = 0; i < leds.length; i++) {
        leds[i].style.boxShadow = "-2px -2px 4px 0px #a7a7a73d, 2px 2px 4px 0px #0a0a0e5e";
        leds[i].style.height = "22px"
    }

    for (var i = 0; i < range.length; i++) {
        range[i].style.boxShadow = `5px 2px 5px 0px #0a0a0e5e inset, -2px -2px 1px 0px #a7a7a73d inset, -2px -2px 30px 0px ${ledColor[i]} inset`;
        range[i].style.height = "25px"
    }
}

/**
 * Method used to create a comunication between
 * AudioWorkletNode, Microphone and AudioWorkletProcessor
 * 
 * @param {MediaStream} stream If user grant access to microphone, this gives you
 * a MediaStream object necessary in this implementation
 */
async function onMicrophoneGranted(stream) {
    // Instanciate just in the first time
    // when button is pressed
    if (isFirtsClick) {
        // Initialize AudioContext object
        audioContext = new AudioContext()

        // Adding an AudioWorkletProcessor
        // from another script with addModule method
        await audioContext.audioWorklet.addModule('vumeter-processor.js')

        // Creating a MediaStreamSource object
        // and sending a MediaStream object granted by 
        // the user
        let microphone = audioContext.createMediaStreamSource(stream)

        // Creating AudioWorkletNode sending
        // context and name of processor registered
        // in vumeter-processor.js
        const node = new AudioWorkletNode(audioContext, 'vumeter')

        // Listing any message from AudioWorkletProcessor in its
        // process method here where you can know
        // the volume level
        node.port.onmessage  = event => {
            let _volume = 0
            let _sensibility = 5 // Just to add any sensibility to our ecuation
            if (event.data.volume)
                _volume = event.data.volume;
            leds((_volume * 100) / _sensibility)
        }

        // Now this is the way to
        // connect our microphone to
        // the AudioWorkletNode and output from audioContext
        microphone.connect(node).connect(audioContext.destination)

        isFirtsClick = false
    }

    // Just to know if button is on or off
    // and stop or resume the microphone listening
    let audioButton = document.getElementsByClassName('audio-control')[0]
    if (listeing) {
        audioContext.suspend()
        audioButton.style.boxShadow = "-2px -2px 4px 0px #a7a7a73d, 2px 2px 4px 0px #0a0a0e5e"
        audioButton.style.fontSize = "25px"
    } else {
        audioContext.resume()
        audioButton.style.boxShadow = "5px 2px 5px 0px #0a0a0e5e inset, -2px -2px 1px 0px #a7a7a73d inset"
        audioButton.style.fontSize = "24px"
    }

    listeing = !listeing
}

function activeSound () {
    // Tell user that this
    // program wants to use
    // the microphone
    try {
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
        
        navigator.getUserMedia(
            { audio: true, video: false },
            onMicrophoneGranted,
            onMicrophoneDenied
        );
    } catch(e) {
        alert(e)
    }
}

document.getElementById('audio').addEventListener('click', () => {
    activeSound()
})

本节介绍的是您可以知道麦克风音量的实现:

const SMOOTHING_FACTOR = 0.8;
const MINIMUM_VALUE = 0.00001;

// This is the way to register an AudioWorkletProcessor
// it's necessary to declare a name, in this case
// the name is "vumeter"
registerProcessor('vumeter', class extends AudioWorkletProcessor {

  _volume
  _updateIntervalInMS
  _nextUpdateFrame

  constructor () {
    super();
    this._volume = 0;
    this._updateIntervalInMS = 25;
    this._nextUpdateFrame = this._updateIntervalInMS;
    this.port.onmessage = event => {
      if (event.data.updateIntervalInMS)
        this._updateIntervalInMS = event.data.updateIntervalInMS;
    }
  }

  get intervalInFrames () {
    return this._updateIntervalInMS / 1000 * sampleRate;
  }

  process (inputs, outputs, parameters) {
    const input = inputs[0];

    // Note that the input will be down-mixed to mono; however, if no inputs are
    // connected then zero channels will be passed in.
    if (input.length > 0) {
      const samples = input[0];
      let sum = 0;
      let rms = 0;

      // Calculated the squared-sum.
      for (let i = 0; i < samples.length; ++i)
        sum += samples[i] * samples[i];

      // Calculate the RMS level and update the volume.
      rms = Math.sqrt(sum / samples.length);
      this._volume = Math.max(rms, this._volume * SMOOTHING_FACTOR);

      // Update and sync the volume property with the main thread.
      this._nextUpdateFrame -= samples.length;
      if (this._nextUpdateFrame < 0) {
        this._nextUpdateFrame += this.intervalInFrames;
        this.port.postMessage({volume: this._volume});
      }
    }
    
    return true;
  }
});

最后是html,您可以在其中显示检测到的音量:

<div class="container">
    <span>Microphone</span>
    <div class="volumen-wrapper">
        <div class="led"></div>
        <div class="led"></div>
        <div class="led"></div>
        <div class="led"></div>
        <div class="led"></div>
                
        <div class="led"></div>
        <div class="led"></div>
        <div class="led"></div>
        <div class="led"></div>
        <div class="led"></div>
    </div>

    <div class="control-audio-wrapper">
        <div id="audio" class="audio-control">&#127908;</div>
    </div>
</div>
<script type="module" src="./index.js"></script>

这是结果

以下是我在中的实现 codepen

来源:

  • Enter to Audio worklet
  • Web audio
  • w3.org/webaudio

相关文章