- 1. VMD
- 2. VMD包安装
- 3. 官方VMD源码
- 4. 源码解读及使用
- 4.1 传入参数:
- 4.2 返回参数
- 4.3 完整代码
- 4.4 运行结果
- 5. 参考信息
在信号处理中,变分模态分解是一种信号分解估计方法。 该方法在获取分解分量的过程中通过迭代搜寻变分模型最优解来确定每个分量的频率中心和带宽,从而能够自适应地实现信号的频域剖分及各分量的有效分离。
变分模态分解的整体框架是变分问题,使得每个模态的估计带宽之和最小,其中假设每个“模态”是具有不同中心频率的有限带宽,为解决这一变分问题,采用了交替方向乘子法,不断更新各模态及其中心频率,逐步将各模态解调到相应的基频带,最终各个模态即相应的中心频率被一同提取出来。
2. VMD包安装VMD的目标是将实值输入信号分解成离散数量的子信号,这些子信号在再现输入信号时具有特定的稀疏性。稀疏性体现在谱域的带宽上。 换言之,假设每个子信号主要围绕中心频率,该中心频率将随分解而确定。
VMD具有如下优点:
- 自确定模态分解个数;
- 降低复杂度高和非线性强的时间序列非平稳性;
- 自适应性:在根据实际情况确定所给序列的模态分解个数,随后的搜索和求解过程中可以自适应地匹配每种模态的最佳中心频率和有限带宽,并且可以实现固有模态分量(IMF)2的有效分离、信号的频域划分;
pip install vmdpy
3. 官方VMD源码
def VMD(signal, alpha, tau, K, DC, init, tol):
# ---------------------
# signal - the time domain signal (1D) to be decomposed
# alpha - the balancing parameter of the data-fidelity constraint
# tau - time-step of the dual ascent ( pick 0 for noise-slack )
# K - the number of modes to be recovered
# DC - true if the first mode is put and kept at DC (0-freq)
# init - 0 = all omegas start at 0
# 1 = all omegas start uniformly distributed
# 2 = all omegas initialized randomly
# tol - tolerance of convergence criterion; typically around 1e-6
#
# Output:
# -------
# u - the collection of decomposed modes
# u_hat - spectra of the modes
# omega - estimated mode center-frequencies
#
import numpy as np
import math
import matplotlib.pyplot as plt
# Period and sampling frequency of input signal
save_T=len(signal)
fs=1/float(save_T)
# extend the signal by mirroring
T=save_T
# print(T)
f_mirror=np.zeros(2*T)
#print(f_mirror)
f_mirror[0:T//2]=signal[T//2-1::-1]
# print(f_mirror)
f_mirror[T//2:3*T//2]= signal
# print(f_mirror)
f_mirror[3*T//2:2*T]=signal[-1:-T//2-1:-1]
# print(f_mirror)
f=f_mirror
# print('f_mirror')
# print(f_mirror)
print('-------')
# Time Domain 0 to T (of mirrored signal)
T=float(len(f))
# print(T)
t=np.linspace(1/float(T),1,int(T),endpoint=True)
# print(t)
# Spectral Domain discretization
freqs=t-0.5-1/T
# print(freqs)
# print('-----')
# Maximum number of iterations (if not converged yet, then it won't anyway)
N=500
# For future generalizations: individual alpha for each mode
Alpha=alpha*np.ones(K,dtype=complex)
# print(Alpha.shape)
# print(Alpha)
# print('-----')
# Construct and center f_hat
f_hat=np.fft.fftshift(np.fft.fft(f))
# print('f_hat')
# print(f_hat.shape)
# print(f_hat)
# print('-----')
f_hat_plus=f_hat
f_hat_plus[0:int(int(T)/2)]=0
# print('f_hat_plus')
# print(f_hat_plus.shape)
# print(f_hat_plus)
# print('-----')
# matrix keeping track of every iterant // could be discarded for mem
u_hat_plus=np.zeros((N,len(freqs),K),dtype=complex)
# print('u_hat_plus')
# print(u_hat_plus.shape)
# print(u_hat_plus)
# print('-----')
# Initialization of omega_k
omega_plus=np.zeros((N,K),dtype=complex)
# print('omega_plus')
# print(omega_plus.shape)
# print(omega_plus)
if (init==1):
for i in range(1,K+1):
omega_plus[0,i-1]=(0.5/K)*(i-1)
elif (init==2):
omega_plus[0,:]=np.sort(math.exp(math.log(fs))+(math.log(0.5)-math.log(fs))*np.random.rand(1,K))
else:
omega_plus[0,:]=0
if (DC):
omega_plus[0,0]=0
# print('omega_plus')
# print(omega_plus.shape)
# print(omega_plus)
# start with empty dual variables
lamda_hat=np.zeros((N,len(freqs)),dtype=complex)
# other inits
uDiff=tol+2.2204e-16 #updata step
# print('uDiff')
# print(uDiff)
# print('----')
n=1 #loop counter
sum_uk=0 #accumulator
T=int(T)
# ----------- Main loop for iterative updates
while uDiff > tol and n<N:
# update first mode accumulator
k=1
sum_uk = u_hat_plus[n-1,:,K-1]+sum_uk-u_hat_plus[n-1,:,0]
# print('sum_uk')
# print(sum_uk)
#update spectrum of first mode through Wiener filter of residuals
u_hat_plus[n,:,k-1]=(f_hat_plus-sum_uk-lamda_hat[n-1,:]/2)/(1+Alpha[k-1]*np.square(freqs-omega_plus[n-1,k-1]))
# print('u_hat_plus')
# print(u_hat_plus.shape)
# print(u_hat_plus[n,:,k-1])
# print('-----')
#update first omega if not held at 0
if DC==False:
omega_plus[n,k-1]=np.dot(freqs[T//2:T],np.square(np.abs(u_hat_plus[n,T//2:T,k-1])).T)/np.sum(np.square(np.abs(u_hat_plus[n,T//2:T,k-1])))
for k in range(2,K+1):
#accumulator
sum_uk=u_hat_plus[n,:,k-2]+sum_uk-u_hat_plus[n-1,:,k-1]
# print('sum_uk'+str(k))
# print(sum_uk)
#mode spectrum
u_hat_plus[n,:,k-1]=(f_hat_plus-sum_uk-lamda_hat[n-1,:]/2)/(1+Alpha[k-1]*np.square(freqs-omega_plus[n-1,k-1]))
# print('u_hat_plus'+str(k))
# print(u_hat_plus[n,:,k-1])
#center frequencies
omega_plus[n,k-1]=np.dot(freqs[T//2:T],np.square(np.abs(u_hat_plus[n,T//2:T,k-1])).T)/np.sum(np.square(np.abs(u_hat_plus[n,T//2:T:,k-1])))
# print('omega_plus'+str(k))
# print(omega_plus[n,k-1])
#Dual ascent
# print(u_hat_plus.shape)
lamda_hat[n,:]=lamda_hat[n-1,:]+tau*(np.sum(u_hat_plus[n,:,:],axis=1)-f_hat_plus)
# print('lamda_hat'+str(n))
# print(lamda_hat[n,:])
#loop counter
n=n+1
#converged yet?
uDiff=2.2204e-16
for i in range(1,K+1):
uDiff=uDiff+1/float(T)*np.dot(u_hat_plus[n-1,:,i-1]-u_hat_plus[n-2,:,i-1],(np.conj(u_hat_plus[n-1,:,i-1]-u_hat_plus[n-2,:,i-1])).conj().T)
uDiff=np.abs(uDiff)
# print('uDiff')
# print(uDiff)
# print('f_hat_plus')
# print(f_hat_plus.shape)
# print(f_hat_plus)
# print('-----')
# print('u_hat_plus')
# print(u_hat_plus.shape)
# print(u_hat_plus)
# print('-----')
# print('sum_uk')
# print(sum_uk)
# print('-----')
# ------ Postprocessing and cleanup
# discard empty space if converged early
N=np.minimum(N,n)
omega = omega_plus[0:N,:]
# Signal reconstruction
u_hat = np.zeros((T,K),dtype=complex)
u_hat[T//2:T,:]= np.squeeze(u_hat_plus[N-1,T//2:T,:])
# print('u_hat')
# print(u_hat.shape)
# print(u_hat)
u_hat[T//2:0:-1,:]=np.squeeze(np.conj(u_hat_plus[N-1,T//2:T,:]))
u_hat[0,:]=np.conj(u_hat[-1,:])
# print('u_hat')
# print(u_hat)
u=np.zeros((K,len(t)),dtype=complex)
for k in range(1,K+1):
u[k-1,:]= np.real(np.fft.ifft(np.fft.ifftshift(u_hat[:,k-1])))
# remove mirror part
u=u[:,T//4:3*T//4]
# print(u_hat.shape)
#recompute spectrum
u_hat = np.zeros((T//2,K),dtype=complex)
for k in range(1,K+1):
u_hat[:,k-1]=np.fft.fftshift(np.fft.fft(u[k-1,:])).conj().T
# print('-----')
# print('-----')
# print(u)
# print('-----')
# print(u_hat)
# print('-----')
# print(omega)
plt.plot(signal)
plt.show()
for i in range(1,K+1):
plt.plot(u[i-1,:])
plt.show()
return (u,u_hat,omega)
4. 源码解读及使用
4.1 传入参数:4.3 完整代码4.2 返回参数
- signal:要分解的信号(一维)
- alpha: 数据保真度约束的平衡参数,经验值为抽样点长度的1.5-2.0倍
- tau: 噪声容限
- K: 要分解模态的数量
- DC:合成的信号若无常量,则取值为0,若含有常量,则取值为1
- init: 初始化w值
初始化为0时,所有的随机数从0开始
初始化为1时,均匀分布产生的随机数;
初始化为2时,随机分布产生的随机数- tol:控制误差大学常量,决定精度与迭代次数,通常为1e-6左右
- u - 分解后的模态集合
- u_hat - 模态的频谱
- omega - 估计的模态中心频率
from VMD import VMD
import numpy as np
import math
import matplotlib.pyplot as plt
f_1 = 2.0
f_2 = 24.0
f_3 = 288.0
T=1000
t=np.linspace(1/float(T),1,T,endpoint=True)
pi=np.pi
# print(t)
v_1 =np.cos(2*pi*f_1*t)
v_2 = 1/4.0*np.cos(2*pi*f_2*t)
v_3 = 1/16.0*np.cos(2*pi*f_3*t)
# print(v_1)
# print(v_2)
# print(v_3)
alpha = 2000.0
tau = 0
K = 3
DC = 0
init = 1
tol = 1e-7
signal=v_1+v_2+v_3
(u,u_hat,omega)=VMD(signal, alpha, tau, K, DC, init, tol)
print(u.shape)
print(u_hat.shape)
print(omega.shape)
4.4 运行结果
K. Dragomiretskiy, D. Zosso, Variational Mode Decomposition, IEEE Trans. on Signal Processing (in press) please check here for update reference: http://dx.doi.org/10.1109/TSP.2013.2288675
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)