%pylab inline
#%matplotlib qt
from __future__ import division # use so 1/2 = 0.5, etc.
import sk_dsp_comm.sigsys as ss
import sk_dsp_comm.pyaudio_helper as pah
import scipy.signal as signal
import time
import sys
import imp # for module development and reload()
from IPython.display import Audio, display
from IPython.display import Image, SVG
pylab.rcParams['savefig.dpi'] = 100 # default 72
#pylab.rcParams['figure.figsize'] = (6.0, 4.0) # default (6,4)
#%config InlineBackend.figure_formats=['png'] # default for inline viewing
%config InlineBackend.figure_formats=['svg'] # SVG inline viewing
#%config InlineBackend.figure_formats=['pdf'] # render pdf figs for LaTeX
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from threading import Thread
Up to this point we have been doing all of our processing in mono
which means we have only been processing one channel. While this is fine for some applications, stereo
or two channel processing becomes much more interesting. Stereo processing can take multiple forms. We can do stereo processing with a mono input where we copy the mono input and process each side independently. For instance, let's say you are wanting to do some stereo processing on a guitar. A guitar only takes up one channel, so, we will take the single channel input, copy it, and process the original and the copy independently. We can also process stereo inputs. Now, let's say we want to do more processing on the guitar on top of our first stereo effect. Now we can take in both channels independently, and process them independently. This gives us a lot of options to work with.
As a basic example, let's try some stereo volume control.
We will start by taking our logarithmic volume slider which we developed in the previous notebook and apply it to the left and right channels individually.
# dB slider from -60 dB to 0.0 dB
volume_dB_left = widgets.FloatSlider(description = 'Left (dB)',
continuous_update = True, # Continuous updates
value = -6.0,
min = -60.0,
max = 0.0,
step = 0.001,
orientation = 'vertical',
readout_format = '.2f')
# dB slider from -60 dB to 0.0 dB
volume_dB_right = widgets.FloatSlider(description = 'Right (dB)',
continuous_update = True, # Continuous updates
value = -6.0,
min = -60.0,
max = 0.0,
step = 0.001,
orientation = 'vertical',
readout_format = '.2f')
# Holds "cooked" values
volume_scale_left = widgets.FloatText(description = 'Left',
continuous_update = True, # Continuous updates
value = 10**(volume_dB_left.value/20),
min = 0.0,
max = 1.0,
step = 0.001)
# Holds "cooked" values
volume_scale_right = widgets.FloatText(description = 'Right',
continuous_update = True, # Continuous updates
value = 10**(volume_dB_right.value/20),
min = 0.0,
max = 1.0,
step = 0.001)
# cooking function left
# Any time the left slider changes, the scalar values will be "cooked"
def on_left_value_change(change):
volume_scale_left.value = 10**(change['new']/20) # "cook" left slider to scalar
volume_dB_left.observe(on_left_value_change,names = 'value') #
# cooking function right
# Any time the right slider changes, the scalar values will be "cooked"
def on_right_value_change(change):
volume_scale_right.value = 10**(change['new']/20) # "cook" right slider to scalar
volume_dB_right.observe(on_right_value_change,names = 'value')
# Display sliders
widgets.HBox([volume_dB_left,volume_dB_right])
Now let's create a stereo callback. If we choose the stereo mode in our DSP_io_stream.interactive_stream(numChan = 2)
function, then in_data
will give us packed left and right channel data. In order to unpack this data, we can call DSP_io_stream.get_LR()
which will return the left and right channel data for the frame given the packed stereo input data. Once unpacked, we can process both channels independently. In this case we will scale the volume using the volume_scale_left.value
and volume_scale_right.value
parameters. Once we are done processing, we need to pack the left and right channel data together again. We can do this with the DSP_io_stream.pack_LR()
function. After this, if we are wanting to capture samples, we can call the DSP_io_stream.DSP_capture_add_samples_stereo
which will capture the left and right channels independently. The rest of the callback is the same after this.
# Scale right and left channels independently
def callback(in_data, frame_count, time_info, status):
DSP_IO.DSP_callback_tic()
# convert byte data to ndarray
in_data_nda = np.fromstring(in_data, dtype=np.int16)
# separate left and right data
x_left,x_right = DSP_IO.get_LR(in_data_nda.astype(float32))
#***********************************************
# DSP operations here
y_left = volume_scale_left.value*x_left
y_right = volume_scale_right.value*x_right
#***********************************************
# Pack left and right data together
y = DSP_IO.pack_LR(y_left,y_right)
# Typically more DSP code here
#***********************************************
# Save data for later analysis
# accumulate a new frame of samples
DSP_IO.DSP_capture_add_samples_stereo(y_left,y_right)
#***********************************************
# Convert from float back to int16
y = y.astype(int16)
DSP_IO.DSP_callback_toc()
# Convert ndarray back to bytes
#return (in_data_nda.tobytes(), pyaudio.paContinue)
return y.tobytes(), pah.pyaudio.paContinue
# Check available ports
pah.available_devices()
N_FRAME = 512
# Create streaming object: use Built-in mic (idx = 0) and output (idx = 1)
DSP_IO = pah.DSP_io_stream(callback,in_idx=0,out_idx=1,fs=44100,
frame_length = N_FRAME,Tcapture = 10)
# use thread stream so widget can be used; Tsec = 0 <==> infinite stream
DSP_IO.interactive_stream(Tsec = 20, numChan = 2) # 20 Second stereo stream
# display volume control widgets
widgets.HBox([volume_dB_left,volume_dB_right])
Npts = 441000
Nstart = 200000
figure(2,figsize=(6,5))
subplot(211)
plot(arange(len(DSP_IO.data_capture_left[Nstart:Nstart+Npts]))/44100,
DSP_IO.data_capture_left[Nstart:Nstart+Npts]/max(DSP_IO.data_capture_left[Nstart:Nstart+Npts]))
title(r'A Portion of the left capture buffer')
ylabel(r'Amplitude')
xlabel(r'Time (s)')
grid();
subplot(212)
plot(arange(len(DSP_IO.data_capture_right[Nstart:Nstart+Npts]))/44100,
DSP_IO.data_capture_right[Nstart:Nstart+Npts]/max(DSP_IO.data_capture_right[Nstart:Nstart+Npts]))
title(r'A Portion of the right capture buffer')
ylabel(r'Amplitude')
xlabel(r'Time (s)')
grid();
tight_layout();
The plot below is similar to a logic analyzer plot from embedded systems. With the frame length of 512 samples and a sampling rate of 44.1 kHZ, we expect a new frame to arrive at the callback
every $512\times (1/44.1) = 11.61$ ms. The width of the pulses reflects the time spent in the callback
.
512/44.1
DSP_IO.cb_active_plot(330,360)
Now we have stereo processing capabilities.