import matplotlib.pyplot as plt
import numpy as np
from radicalsdk.h5dataset import H5DatasetLoader
from radicalsdk.radar.config_v1 import read_radar_params
from radicalsdk.radar.v1 import RadarFrame
data = H5DatasetLoader('../samples/indoor_sample_50.h5')
# Read config and configure RadarFrame object
radar_config = read_radar_params('../samples/indoor_human_rcs.cfg')
rf = RadarFrame(radar_config)
This is the scene that we are looking at:
FRAME_IDX = 2
plt.imshow(data['rgb'][FRAME_IDX][:, :, ::-1])
Capon (Minimum Variance) Beamforming
High resolution beamforming using Capon's method.
$$ P(\theta) = \frac{1}{v^H R v} $$
This is the default beamforming method we use in the SDK. Using compute_range_azimuth
with no arguments computes the covariance matrix using all slow-time chirps (i.e. chirps for estimating Doppler).
References
- Lorenz, Robert G., and Stephen P. Boyd. "Robust minimum variance beamforming." IEEE transactions on signal processing 53.5 (2005): 1684-1696. link
- J. Capon, "High-resolution frequency-wavenumber spectrum analysis," in Proceedings of the IEEE, vol. 57, no. 8, pp. 1408-1418, Aug. 1969, doi: 10.1109/PROC.1969.7278. link
plt.figure(figsize=(5, 10))
plt.imshow(np.log(np.abs(rf.compute_range_azimuth(data['radar'][FRAME_IDX]))))
plt.show()
# transpose `x` if we have fewer chirps than antennas. This is not desired.
def cov_matrix(x):
"""Computes covariance matrix of input signal x"""
Rxx = x @ np.conjugate(x.T)
Rxx = np.divide(Rxx, x.shape[1])
return Rxx
def forward_backward_avg(Rxx):
""" Performs forward backward averaging on the given input square matrix
Args:
Rxx (ndarray): A 2D-Array square matrix containing the covariance matrix for the given input data
Returns:
R_fb (ndarray): The 2D-Array square matrix containing the forward backward averaged covariance matrix
"""
assert np.size(Rxx, 0) == np.size(Rxx, 1)
# --> Calculation
M = np.size(Rxx, 0) # Find number of antenna elements
Rxx = np.matrix(Rxx) # Cast np.ndarray as a np.matrix
# Create exchange matrix
J = np.eye(M) # Generates an identity matrix with row/col size M
J = np.fliplr(J) # Flips the identity matrix left right
J = np.matrix(J) # Cast np.ndarray as a np.matrix
R_fb = 0.5 * (Rxx + J * np.conjugate(Rxx) * J)
return np.array(R_fb)
def aoa_capon(x, steering_vector, mu = 1e-7):
Rxx = cov_matrix(x)
uI = np.eye(Rxx.shape[0]) * mu
Rxx = forward_backward_avg(Rxx)
Rxx_inv = np.linalg.inv(Rxx + uI)
# Calculate Covariance Matrix Rxx
first = Rxx_inv @ steering_vector.T
den = np.reciprocal(np.einsum('ij,ij->i', steering_vector.conj(), first.T))
weights = np.matmul(first, den)
return den, weights
range_cube = rf.range_cube.copy()
per_chirp_ra = np.zeros((rf.range_nbins, rf.num_vec, rf.doppler_bins), dtype=np.complex_)
for dd in range(rf.doppler_bins):
for rr in range(rf.range_nbins):
per_chirp_ra[rr, :, dd], _ = aoa_capon(range_cube[rr, :, dd:dd+1], rf.steering_vec)
plt.figure(figsize=(5, 10))
plt.imshow(np.fliplr(np.flipud(np.log(np.abs(np.sum(per_chirp_ra, axis=2))))))
plt.show()
from mmwave import dsp
range_cube = rf.range_cube.copy()
ra = dsp.aoa_bartlett(rf.steering_vec, range_cube.T, axis=1)
plt.figure(figsize=(5, 10))
plt.imshow(np.fliplr(np.flipud(np.log(np.abs(np.sum(ra, axis=0))).T)))
plt.show()
range_cube = rf.range_cube.copy()
range_cube_padded = np.pad(range_cube, pad_width=[(0, 0), (86, 87), (0, 0)], mode='constant')
azi_fft = np.fft.fftshift(np.fft.fft(range_cube_padded, axis=1), axes=1)
azi_power = np.abs(azi_fft)**2
beamformed_img = np.flipud(np.mean(azi_power, axis=2))
plt.figure(figsize=(5, 10))
plt.imshow(np.log(beamformed_img))
plt.show()
def _noise_subspace(covariance, num_sources):
"""helper function to get noise_subspace.
"""
if covariance.ndim != 2 or covariance.shape[0] != covariance.shape[1]:
raise ValueError("covariance matrix should be a 2D square matrix.")
if num_sources >= covariance.shape[0]:
raise ValueError("number of sources should be less than number of receivers.")
_, v = np.linalg.eigh(covariance)
return v[:, :-num_sources]
def aoa_music_1D(steering_vec, rx_chirps, num_sources):
"""Implmentation of 1D MUltiple SIgnal Classification (MUSIC) algorithm on ULA (Uniformed Linear Array).
Current implementation assumes covariance matrix is not rank deficient and ULA spacing is half of the wavelength.
.. math::
P_{} (\\theta) = \\frac{1}{a^{H}(\\theta) \mathbf{E}_\mathrm{n}\mathbf{E}_\mathrm{n}^H a(\\theta)}
where :math:`E_{n}` is the noise subpace and :math:`a` is the steering vector.
Args:
steering_vec (~np.ndarray): steering vector with the shape of (FoV/angel_resolution, num_ant).
FoV/angel_resolution is usually 181. It is generated from gen_steering_vec() function.
rx_chirps (~np.ndarray): Ouput of the 1D range FFT. The shape is (num_ant, num_chirps_per_frame).
num_sources (int): Number of sources in the scene. Needs to be smaller than num_ant for ULA.
Returns:
(~np.ndarray): the spectrum of the MUSIC. Objects should be holes for the equation and thus sharp peaks.
"""
num_antennas = rx_chirps.shape[0]
assert num_antennas == steering_vec.shape[1], "Mismatch between number of receivers in "
if num_antennas < num_sources:
raise ValueError("number of sources shoule not exceed number ")
R = cov_matrix(rx_chirps)
noise_subspace = _noise_subspace(R, num_sources)
v = noise_subspace.T.conj() @ steering_vec.T
spectrum = np.reciprocal(np.sum(v * v.conj(), axis=0).real)
return spectrum
Here we assume that at each range, there's only 1 reflector
range_cube = rf.range_cube.copy()
ra_MUSIC = np.zeros((rf.range_nbins, rf.num_vec))
for rr in range(rf.range_nbins):
ra_MUSIC[rr, :] = aoa_music_1D(rf.steering_vec, range_cube[rr, :, :], 1)
plt.figure(figsize=(5, 10))
plt.imshow(np.log(np.flipud(np.fliplr(ra_MUSIC))))
plt.show()