forked from whitphx/streamlit-webrtc
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy path3_audio_filter.py
40 lines (31 loc) · 1.21 KB
/
3_audio_filter.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import av
import numpy as np
import pydub
import streamlit as st
from streamlit_webrtc import WebRtcMode, webrtc_streamer
from sample_utils.turn import get_ice_servers
gain = st.slider("Gain", -10.0, +20.0, 1.0, 0.05)
def process_audio(frame: av.AudioFrame) -> av.AudioFrame:
raw_samples = frame.to_ndarray()
sound = pydub.AudioSegment(
data=raw_samples.tobytes(),
sample_width=frame.format.bytes,
frame_rate=frame.sample_rate,
channels=len(frame.layout.channels),
)
sound = sound.apply_gain(gain)
# Ref: https://github.com/jiaaro/pydub/blob/master/API.markdown#audiosegmentget_array_of_samples # noqa
channel_sounds = sound.split_to_mono()
channel_samples = [s.get_array_of_samples() for s in channel_sounds]
new_samples: np.ndarray = np.array(channel_samples).T
new_samples = new_samples.reshape(raw_samples.shape)
new_frame = av.AudioFrame.from_ndarray(new_samples, layout=frame.layout.name)
new_frame.sample_rate = frame.sample_rate
return new_frame
webrtc_streamer(
key="audio-filter",
mode=WebRtcMode.SENDRECV,
rtc_configuration={"iceServers": get_ice_servers()},
audio_frame_callback=process_audio,
async_processing=True,
)