Fourier Demos with Audio

In [42]:
import numpy as np
from IPython.display import Audio
import scipy.io.wavfile
import matplotlib.pyplot as plt
%matplotlib inline
from numpy.fft import fft, ifft, fft2, ifft2, fftshift, ifftshift
#from scipy import ndimage, misc
from scipy.signal import gaussian
from joSigProc import *

Examples of Frequency Modulation (filtering)

First with audio

In [43]:
fs = [15,7]
F1 = np.zeros(128); F2 = F1.copy(); F3 = F1.copy()
F1[1] = 0.9
F2[4] = 0.5
F3[12] = 0.2
f1 = ifft(F1)*len(F1); f2 = ifft(F2)*len(F2); f3 = ifft(F3)*len(F3); 
plt.figure(figsize=fs);
plt.subplot(2,3,1); plt.plot(np.real(f1)); plt.axis([0,128,-2,2])
plt.subplot(2,3,4); plt.stem(F1); plt.axis([0,128,-1,1]);
plt.subplot(2,3,2); plt.plot(np.real(f2)); plt.axis([0,128,-2,2])
plt.subplot(2,3,5); plt.stem(F2); plt.axis([0,128,-1,1]);
plt.subplot(2,3,3); plt.plot(np.real(f3)); plt.axis([0,128,-2,2])
plt.subplot(2,3,6); plt.stem(F3); plt.axis([0,128,-1,1]);
In [44]:
f = f1 + f2 + f3
F = F1 + F2 + F3
plt.figure(figsize=fs)
plt.subplot(1,2,1); plt.plot(np.real(f)); plt.axis([0,128,-2,2])
plt.subplot(1,2,2); plt.stem(F); plt.axis([0,128,-1,1]);

Periodicity of frequency domain

Why do indecies above N/2 correspond to lower (negative) frequencies?

In [45]:
N = 32
tt = np.linspace(0, N, 1000)
In [46]:
# Start with a low frequency index
k = 2
F4 = np.zeros(N);
F4[k] = N
f4 = ifft(F4)
plt.figure(figsize=(14,3))
plt.subplot(1,2,1); plt.plot(tt, np.cos(2.*k*np.pi*tt/N), 'r-'); plt.plot(np.real(f4),'.', markersize=8);
plt.subplot(1,2,2); plt.plot(np.real(f4),'.', markersize=8);
In [47]:
k = 7
F4 = np.zeros(N);
F4[k] = N
f4 = ifft(F4)
plt.figure(figsize=(14,3))
plt.subplot(1,2,1); plt.plot(tt, np.cos(2.*k*np.pi*tt/N), 'r-'); plt.plot(np.real(f4),'.', markersize=8);
plt.subplot(1,2,2); plt.plot(np.real(f4),'.', markersize=8);
In [48]:
k = 16
F4 = np.zeros(N);
F4[k] = N
f4 = ifft(F4)
plt.figure(figsize=(14,3))
plt.subplot(1,2,1); plt.plot(tt, np.cos(2.*k*np.pi*tt/N), 'r-'); plt.plot(np.real(f4),'.', markersize=8);
plt.subplot(1,2,2); plt.plot(np.real(f4),'.', markersize=8);
In [49]:
k = 29
F4 = np.zeros(N);
F4[k] = N
f4 = ifft(F4)
plt.figure(figsize=(14,3))
plt.subplot(1,2,1); plt.plot(tt, np.cos(2.*k*np.pi*tt/N), 'r-'); plt.plot(np.real(f4),'.', markersize=8);
plt.subplot(1,2,2); plt.plot(np.real(f4),'.', markersize=8);

Aliasing

Once your index gets past $\frac{N}{2}$, the Fourier coefficients start to correspond to lower and lower frequencies. The plot below show what frequency corresponds to each index. This phenomenon of higher frequencies looking like lower frequencies is called "aliasing".

In [50]:
F = np.zeros(16384)
N = len(F)      # Number of samples in time (or space)
Omega = 8192    # Sound is samples at 'Omega' samples per second (Hz)
L = N / Omega   # The sound clip will be L seconds long

# To get the corresponding freqencies for the Fourier coefficients,
# we can use these lines.
omega = np.arange(0,N, dtype=float)
omega[omega>=np.floor(N/2)] -= N
omega /= L

plt.plot(omega, 'b--');
plt.plot(omega,'.'); plt.xlabel('Index'); plt.ylabel('Frequency (Hz)');
In [51]:
idx = 4000   # frequency idx/L Hz
F = np.zeros(N)
F[idx] = 1.1
f = np.real(ifft(F))  # IDFT
f = f / max(f)  # normalize so we don't blow out our speakers

# Plot it
plt.plot(omega, abs(F) )
plt.title(str(omega[idx])+'Hz Tone')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Modulus')

#PlotFT(f, Omega, 1)
#scipy.io.wavfile.write('tone.wav', Omega, f)
Audio(f, rate=Omega)
Out[51]:

Frequency Filtering in Music

In [61]:
# Widening filter on voice
Omega, f = scipy.io.wavfile.read('Stamp.wav')
Audio(f[15*Omega:32*Omega,0], rate=Omega)
#scipy.io.wavfile.write('Stamp_15-32.wav', Omega, f[15*Omega:32*Omega,0])
Out[61]:
In [62]:
Omega, f = scipy.io.wavfile.read('Stamp.wav')
f1 = f[32*Omega:42*Omega,0]
g1 = FilterSignal(f1, 0.05*len(f1), band='low')
PlotFT(f1, Omega, fig=1, color='0.75')
PlotFT(g1, Omega, fig=1, clf=False)
Audio(g1, rate=Omega)
Out[62]:
In [63]:
# Narrowing filter on banjo
Omega, f = scipy.io.wavfile.read('TwoHeads.wav')
Audio(f[25*Omega:37*Omega,0], rate=Omega)
Out[63]:

Separating components

In [56]:
ff = f[150*Omega:165*Omega,0].copy()
PlotSignal(ff, Omega)
Audio(ff, rate=Omega)
Out[56]:
In [57]:
FF = np.fft.fftshift(np.fft.fft(ff))
shifted_omega = ShiftedFreqSamples(ff, Omega)
PlotFT(ff, Omega);
In [58]:
T = 800
G = FF.copy()
G[abs(shifted_omega)>T] = 0.
plt.figure(1); plt.clf()
PlotFT_raw(shifted_omega, abs(G), color='r');
In [59]:
g_low = np.real(ifft(ifftshift(G)))
g = g_low
Audio(np.real(g), rate=Omega)
Out[59]:
In [60]:
T = 1200
G = FF.copy()
G[abs(shifted_omega)<T] = 0.
PlotFT_raw(shifted_omega, abs(G), fig=1, clf=False)
g_high = np.real(ifft(ifftshift(G)))
g = g_high
Audio(np.real(g), rate=Omega)
Out[60]:

Frequency Shifting

In [64]:
Omega, f = scipy.io.wavfile.read('handel.wav')
F = fftshift(fft(f))
shifted_omega = ShiftedFreqSamples(f, Omega)
t = TimeSamples(f, Omega)
#shifted_omega = ShiftedFreqSamples(f, Omega)
print('Sampling rate is '+str(Omega)+' Hz')
print('Number of samples is '+str(np.shape(f)))
plt.plot(shifted_omega, fftshift(abs(F)))
plt.title('Frequency Domain')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Modulus');
Audio(f, rate=Omega)
Sampling rate is 8192 Hz
Number of samples is (73113,)
Out[64]:

Shift a bunch of Fourier coefficients

In [65]:
Fshift = F
tau_1 = 50
tau_2 = 1200
shift_Hz = 400
L = t[-1]
shift = int(shift_Hz*L)
print(shift)
3569
In [66]:
idx1 = list(shifted_omega>=tau_1).index(True)
idx2 = list(shifted_omega<tau_2).index(False)
In [67]:
idx = np.arange(idx1,idx2)
Fshift[idx+shift] = F[idx]
In [68]:
g = ifft(ifftshift(Fshift))
Audio(np.real(g), rate=Omega)
Out[68]:
In [ ]: