import math
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
%matplotlib inline
tfd = tf.distributions
# generate some Gaussian samples using tf.contrib.distributions
mu = tf.constant([0., 0.])
sigma = tf.constant([1., 2.])
gaussian_dist = tfd.Normal(loc=mu, scale=sigma)
x_samples = gaussian_dist.sample(500)
with tf.Session() as sess:
x, = sess.run([x_samples])
plt.scatter(x[:,0], x[:,1])
plt.axis('equal')
floatx = tf.float32
complexx = tf.complex64
# define some useful constants
imaginary_unit = tf.constant(1j, dtype=complexx)
zero = tf.constant(0., floatx)
one = tf.constant(1., floatx)
# Creates a steering matrix.
def steering_matrix(sensor_positions, doas, wavelength):
# here both `sensor_positions` and `doas` are row vectors.
k = imaginary_unit * tf.cast(2 * math.pi / wavelength, dtype=complexx)
return tf.exp(k * tf.cast(tf.reshape(sensor_positions, (-1, 1)) * tf.sin(doas), dtype=complexx))
# Generates samples from a white circulary-symmetric complex Gaussian distribution.
def sample_complex_white_gaussian(dist, shape, variance):
return tf.cast(tf.sqrt(variance / 2), complexx) * tf.complex(dist.sample(shape), dist.sample(shape))
# Generates the MUSIC spectrum.
def music(R, sensor_positions, n_doas, wavelength, n_grid_points=360):
# MUSIC
n_sensors = tf.size(sensor_positions)
[v, E] = tf.self_adjoint_eig(R)
# extract the noise subspace
# (TensorFlow orders the eigenvalues from the smallest to the largest)
En = E[:,0:(n_sensors-n_doas)]
# compute the spectrum
doa_grid = tf.cast(tf.linspace(-math.pi/2, math.pi/2, n_grid_points), floatx)
A_grid = steering_matrix(sensor_positions, doa_grid, wavelength)
EhA = tf.matmul(En, A_grid, adjoint_a=True)
music_sp = tf.real(tf.reduce_sum(tf.conj(EhA) * EhA, axis=0))
music_sp = tf.reciprocal(music_sp)
# normalize
music_sp = music_sp / tf.reduce_max(music_sp)
return doa_grid, music_sp
wavelength = one # normalized wavelength
n_doas = 8
n_sensors = 16
n_snapshots = 100
SNR = 10 # dB
# place sources
doa_min = tf.constant(-math.pi/3, dtype=floatx)
doa_max = tf.constant(math.pi/3, dtype=floatx)
doas = tf.linspace(doa_min, doa_max, n_doas)
# create the ULA
d_0 = wavelength / 2.0 # inter-element spacing
sensor_positions = tf.cast(tf.linspace(zero, n_sensors - one, n_sensors) * d_0, floatx)
# compute the steering matrix
A = steering_matrix(sensor_positions, doas, wavelength)
# generate source samples and noise samples
wn_dist = tfd.Normal(loc=zero, scale=one)
source_power = tf.constant(1.0, dtype=floatx)
noise_power = tf.constant(10.**(-SNR/10.), dtype=floatx)
S = sample_complex_white_gaussian(wn_dist, [n_doas, n_snapshots], source_power)
N = sample_complex_white_gaussian(wn_dist, [n_sensors, n_snapshots], noise_power)
# compute the sample covairance matrix
Y = tf.matmul(A, S) + N
R = tf.matmul(Y, Y, adjoint_b=True) / n_snapshots
R = 0.5 * (R + tf.conj(tf.transpose(R))) # ensure Hermitian
# MUSIC!
doa_grid, music_sp = music(R, sensor_positions, n_doas, wavelength)
# run
with tf.Session() as sess:
true_doas, doa_grid_out, music_sp_out = sess.run([doas, doa_grid, music_sp])
plt.figure()
plt.plot(doa_grid_out, music_sp_out)
plt.stem(true_doas, np.ones(true_doas.shape), '--', basefmt=' ')
plt.ylabel('Normailzed Spectrum')
plt.xlabel('Angle')
plt.legend(['MUSIC', 'True DOAs'], loc=1, bbox_to_anchor=(1.3, 1))