?- r. V: T$ F) b% k- \/ d $ r3 p$ d" |) G# o, E0 ^ 1 [( ~( T, E3 I! Z4 R: v%-------------------------------algorithm processing-------------------------------------------------- 2 W1 g0 }, n# h) F, P( S8 j: g; q7 V. E$ {- `
if setup.reverbTime == 0,; B5 [! }1 g, Y& w: ?
setup.reverbTime = 0.2;1 X) q1 u( n4 u; ^, T% l
reflectionOrder = 0;: `" m# M9 B1 |( l5 E
else/ g+ @% O' @+ t* ^( q. C; C
reflectionOrder = -1;% @) e, l( y9 Q& f
end4 q: a; z Q7 J A K; T* ~; J
+ G1 Q& r; n2 D8 A! Y, x" d
rirMatrix = rir_generator(setup.speedOfSound,setup.sampFreq,setup.micPoints',setup.srcPoint',setup.roomDim',... " h/ w5 g3 D+ j/ m7 D* t* e setup.reverbTime,setup.nRirLength,setup.micType,setup.reflectionOrder,[],[],setup.hpFilterFlag);* F- w3 d- \" t- I/ f
! X k d1 x* p) q, {+ S4 ~& }for iSens = 1:setup.nSensors, 9 Q5 {1 ^6 c6 l8 u1 e: z$ `8 g# |0 C/ ] tmpCleanSignal(:,iSens) = fftfilt(rirMatrix(iSens,',cleanSignal); 4 C! S( G. D J& i7 N2 |& k, Qend8 i. v/ J3 Z6 d+ x. ?: Y
mcSignals.clean = tmpCleanSignal(setup.nRirLength:end,; # ~8 S) j' m' G$ P* X$ P$ jsetup.nSamples = length(mcSignals.clean);3 o @" i/ t$ w/ c4 O
( Q4 q) {1 D9 d0 n5 R& NmcSignals.clean = mcSignals.clean - ones(setup.nSamples,1)*mean(mcSignals.clean);) c9 J3 j, q! e
+ K+ \& W; m% `. n* @) u
%-------produce the microphone recieved clean signals---------------------------------------------9 m+ }( u! ]4 p. a/ A
1 ?! I! D+ `0 c, U% X$ L& L0 S
mic_clean1=10*mcSignals.clean(:,1); %Because of the attenuation of the recievd signals,Amplify the signals recieved by Mics with tenfold 0 H1 ^7 t% ?* h* |' F4 ~mic_clean2=10*mcSignals.clean(:,2); % I0 e K, ^, H pmic_clean3=10*mcSignals.clean(:,3); ! y6 J- j, [# `mic_clean4=10*mcSignals.clean(:,4);$ m R: B% x }
audiowrite('mic_clean1.wav' ,mic_clean1,setup.sampFreq); * {9 t- b z$ s( ^: R: xaudiowrite('mic_clean2.wav' ,mic_clean2,setup.sampFreq);0 l9 V3 w+ ^* b, ]. w, \2 l! N
audiowrite('mic_clean3.wav' ,mic_clean3,setup.sampFreq); ' {! F& r# U% p' baudiowrite('mic_clean4.wav' ,mic_clean4,setup.sampFreq); 0 Q7 d+ V" s1 _* F j; P- c7 [6 G: H, l/ r5 \
%----------------------------------end-------------------------------------------------- 1 W' o( W1 u5 F+ z- J- Q' z1 L' e# ? h6 }
addpath([cd,'\..\nonstationaryMultichanNoiseGenerator\']); " ~5 X) I0 {& v# {, A9 M5 I # \6 G4 A E6 C qcleanSignalPowerMeas = var(mcSignals.clean); 5 y3 ]! S! S1 Q/ m6 ]0 N. ?; x7 Q8 f/ } ; b, x9 {. `; b& I( I1 @5 S5 |& h) w, R
mcSignals.diffNoise = generateMultichanBabbleNoise(setup.nSamples,setup.nSensors,setup.sensorDistance,... 4 r+ W4 _0 @# h7 b2 r0 E' S$ b setup.speedOfSound,setup.noiseField);1 E. S3 R+ S1 l
diffNoisePowerMeas = var(mcSignals.diffNoise); " \! E( H* W' ~2 s/ @5 l, wdiffNoisePowerTrue = cleanSignalPowerMeas/10^(setup.sdnr/10); a) C$ l! Q: T* AmcSignals.diffNoise = mcSignals.diffNoise*...' E2 _- i' ]7 X$ o
diag(sqrt(diffNoisePowerTrue)./sqrt(diffNoisePowerMeas));/ g6 Q- m+ |- P( \ D- p
- q0 C* z- Z2 m' ^. RmcSignals.sensNoise = randn(setup.nSamples,setup.nSensors); n m8 M y$ C# ?
sensNoisePowerMeas = var(mcSignals.sensNoise); ) D9 _( U3 Q) n, x9 ^1 F8 A7 asensNoisePowerTrue = cleanSignalPowerMeas/10^(setup.ssnr/10);( w$ K! f6 \3 O d
mcSignals.sensNoise = mcSignals.sensNoise*...' S3 B8 Y' Y0 D7 M" U; o: k/ E
diag(sqrt(sensNoisePowerTrue)./sqrt(sensNoisePowerMeas));7 o( ~. e* S' b" |- |3 {. v3 S0 \
" D9 X; L. b/ g
mcSignals.noise = mcSignals.diffNoise + mcSignals.sensNoise;. `& H3 Z; ?# W& G1 I" ?% ^
mcSignals.observed = mcSignals.clean + mcSignals.noise; " r4 J# t r/ I, K5 X* c; k+ n# @6 `- v' t" i3 G6 Q
%------------------------------processing end----------------------------------------------------------- $ N. O$ @5 {$ C2 \0 |0 m. `. P% K) T) u
* O' U4 W c. J, E' o& B2 V6 K* B1 w
- L3 l/ M3 l; J& o5 l % O7 k6 c1 }# k- |; K- {%----------------produce the noisy speech of MIc in the specific ervironment sets------------------------2 U3 d) F d9 H
* g1 C! T, I& Y7 Knoisy_mix1=10*mcSignals.observed(:,1); %Amplify the signals recieved by Mics with tenfold & {, D1 G/ _+ w0 W- enoisy_mix2=10*mcSignals.observed(:,2); 9 ]6 b( ? k8 C# {7 J1 Ynoisy_mix3=10*mcSignals.observed(:,3); 1 p0 n- M) _0 gnoisy_mix4=10*mcSignals.observed(:,4); 3 c9 g: x9 K9 H( e9 Tl1=size(noisy_mix1);& K' v; R4 t7 ^
l2=size(noisy_mix2); Q3 |2 E# @9 s. a. }; ^, u7 j) K
l3=size(noisy_mix3); ' N4 c/ [2 Z y+ L4 W- ]l4=size(noisy_mix4);9 J2 R6 P$ s0 W
audiowrite('diffused_babble_noise1_20dB.wav' ,noisy_mix1,setup.sampFreq);4 w/ s7 E" `( d* b$ V1 O( \
audiowrite('diffused_babble_noise2_20dB.wav' ,noisy_mix2,setup.sampFreq); 0 _+ Q, P8 [6 U; daudiowrite('diffused_babble_noise3_20dB.wav' ,noisy_mix3,setup.sampFreq); 4 s' s% d$ L" I. ]0 Naudiowrite('diffused_babble_noise4_20dB.wav' ,noisy_mix4,setup.sampFreq); 5 j q: E: c h: Q' G- s ( j" w& B5 e; g. S' u , w3 C, r2 A2 q" C, _8 k% ~; ]( ?%-----------------------------end------------------------------------------------------------------------- ) |$ l; I) I3 ]: o8 F5 T1 q, v这个是主函数,直接运行尽可以得到想要的音频文件,但是你需要先给出你的纯净音频文件和噪声音频,分别对应着:multichannelSignalGenerator()函数中的语句:[cleanSignal,setup.sampFreq] = audioread('..\data\twoMaleTwoFemale20Seconds.wav'),和generateMultichanBabbleNoise()函数中的语句:[singleChannelData,samplingFreq] = audioread('babble_8kHz.wav') 。 $ W' ^- J7 U/ Q( m5 h直接把它们替换成你想要处理的音频文件即可。2 u/ u. i: b6 p! ^* s% Y& u |) I
" N2 v( o- y+ R 除此之外,还有一些基本实验环境参数设置,包括:麦克风的形状为线性麦克风阵列(该代码只能对线性阵列进行仿真建模,并且还是均匀线性阵列,这个不需要设置);麦克风的类型(micType),有全指向型(omnidirectional),心型指向(cardioid),亚心型指向(subcardioid,不知道咋翻译,请见谅) , 超心型(hypercardioid), 双向型(bidirectional),一般默认是全指向型,如下图1所示;麦克风的数量(nSensors);各麦克风之间的间距(sensorDistance);麦克风阵列的中心位置(arrayCenter),用(x,y,z)坐标来表示;麦克风阵列的高度(arrayHeight),感觉和前面的arrayCenter有所重复,不知道为什么还要设置这么一个参数;目标声源的位置(srcPoint),也是用(x,y,z)坐标来表示;目标声源的高度(srcHeight);麦克风阵列距离目标声源的距离(arrayToSrcDistInt),是在xy平面上的投影距离;房间的大小(roomDim),另外房间的(x,y,z)坐标系如图2所示;房间的混响时间(reverbTime);散漫噪声场的类型(noiseField),分为球形场(spherical)和圆柱形场(cylindrical)。! ~2 g/ r8 m3 z {3 P
5 q* k- V, |( L. C# c; x* i/ G3 e7 S ?% {+ p. d6 `
9 B9 l" J# W* x$ q1 |图1 麦克风类型图9 r2 T) l9 B' i W6 q" j" i y) }/ e. F! d/ @! I2 T% L
图二 房间的坐标系 * C9 s0 x6 t1 u# O u3 y5 a% |9 ` 5 l8 ~5 k. M' R% M 以上便是整个仿真实验环境的参数配置,虽然只能对均匀线性的麦克风阵列进行实验测试,但是这对满足我们进行线阵阵列算法的测试是有很大的帮助。说到底,这种麦克风阵列环境的音频数据产生方法还是基于数学模型的仿真,并不可能取代实际的硬件实验环境测试,所以要想在工程上实现麦克风阵列的一些算法,仍然避免不了在实际的环境中进行测试。最后,希望分享的这套代码对大家进行麦克风阵列算法的入门提供帮助。 ! e$ t7 C4 P' l- w. f& B' O, D; P6 u! \———————————————— # S6 o1 f$ I* p9 Y; e& E' [: g版权声明:本文为CSDN博主「Mr_Researcher」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。' K) \. Z; w6 x$ k
原文链接:https://blog.csdn.net/zhanglu_wind/article/details/79674998 5 A" y+ t, `4 f l: l' ?/ E 3 M7 ^' q4 i; H; ]: h) f; k% A( y6 @/ G/ x4 W* D