基于循环神经网络的一维信号降噪方法(简单版本,Python)
代码非常简单。
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.io.wavfile import write
#need install pydub module
#pip install pydub
import numpy as np
import pydub
from scipy import signal
import IPython
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
# For running on GPU
#device = torch.device("cuda")# choose your device
device = torch.device("cpu")
a = torch.rand(5, 5, device=device)# change by either using the device argument
a = a.to(device)# or by .to()
Make data
fs = 512
x = np.linspace(0, 20*np.pi * (1-1/(10*fs)), fs*10)
y_sin = 0.5*np.sin(x)
plt.plot(x, y_sin)
plt.xlabel('Angle [rad]')
plt.ylabel('sin(x)')
plt.axis('tight')
plt.show()
y_triangle = 0.5*signal.sawtooth(x, 0.5)
plt.plot(x, y_triangle)
plt.xlabel('Phase [rad]')
plt.ylabel('triangle(x)')
plt.axis('tight')
plt.show()
y_saw = 0.5*signal.sawtooth(x, 1)
plt.plot(x, y_saw)
plt.xlabel('Phase [rad]')
plt.ylabel('sawtooth(x)')
plt.axis('tight')
plt.show()
Add Gaussian Noise
Add noise
# Add guassian noise
y_sin_n = y_sin + 0.1*np.random.normal(size=len(x))
y_triangle_n = y_triangle + 0.1*np.random.normal(size=len(x))
y_saw_n = y_saw + 0.1*np.random.normal(size=len(x))
plt.plot(x, y_sin_n)
plt.xlabel('Angle [rad]')
plt.ylabel('sin(x) + noise')
plt.axis('tight')
plt.show()
plt.plot(x, y_triangle_n)
plt.xlabel('Phase [rad]')
plt.ylabel('triangle(x) + noise')
plt.axis('tight')
plt.show()
plt.plot(x, y_saw_n)
plt.xlabel('Phase [rad]')
plt.ylabel('sawtooth(x) + noise')
plt.axis('tight')
plt.show()
Creating Dataset
def give_part_of_data(x, y, n_samples=10000, sample_size=100) :
data_inp = np.zeros((n_samples, sample_size))
data_out = np.zeros((n_samples, sample_size))
for i in range(n_samples):
random_offset = np.random.randint(0, len(x) - sample_size)
sample_inp = x[random_offset:random_offset+sample_size]
sample_out = y[random_offset:random_offset+sample_size]
data_inp[i, :] = sample_inp
data_out[i, :] = sample_out
return data_inp, data_out
# Train, Validationa, and Test
sin_train_in, sin_train_out = give_part_of_data(y_sin_n[0:int(7/10 * len(x))], y_sin[0:int(7/10 * len(x))], 2000, int(len(x)/6))
tri_train_in, tri_train_out = give_part_of_data(y_triangle_n[0:int(7/10 * len(x))], y_triangle[0:int(7/10 * len(x))], 2000, int(len(x)/6))
saw_train_in, saw_train_out = give_part_of_data(y_saw_n[0:int(7/10 * len(x))], y_saw[0:int(7/10 * len(x))], 2000, int(len(x)/6))
sin_val_in, sin_val_out = y_sin_n[int(7/10 * len(x)):int(8/10 * len(x))], y_sin[int(7/10 * len(x)):int(8/10 * len(x))]
tri_val_in, tri_val_out = y_triangle_n[int(7/10 * len(x)):int(8/10 * len(x))], y_triangle[int(7/10 * len(x)):int(8/10 * len(x))]
saw_val_in, saw_val_out = y_saw_n[int(7/10 * len(x)):int(8/10 * len(x))], y_saw[int(7/10 * len(x)):int(8/10 * len(x))]
sin_test_in, sin_test_out = y_sin_n[int(8/10 * len(x)):int(10/10 * len(x))], y_sin[int(8/10 * len(x)):int(10/10 * len(x))]
tri_test_in, tri_test_out = y_triangle_n[int(8/10 * len(x)):int(10/10 * len(x))], y_triangle[int(8/10 * len(x)):int(10/10 * len(x))]
saw_test_in, saw_test_out = y_saw_n[int(8/10 * len(x)):int(10/10 * len(x))], y_saw[int(8/10 * len(x)):int(10/10 * len(x))]
plt.plot(range(853), sin_train_in[3])
plt.plot(range(853), sin_train_out[3])
plt.xlabel('Phase [rad]')
plt.ylabel('sin(x) + noise')
plt.axis('tight')
plt.show()
RNN + Sin
# RNN model
input_dim = 1
hidden_size_1 = 60
hidden_size_2 = 60
output_size = 1
class CustomRNN(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, output_size):
super(CustomRNN, self).__init__()
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size_1, batch_first=True)
self.linear = nn.Linear(hidden_size_1, hidden_size_2, )
self.act = nn.Tanh()
self.linear = nn.Linear(hidden_size_2, output_size, )
self.act = nn.Tanh()
def forward(self, x):
pred, hidden = self.rnn(x, None)
pred = self.act(self.linear(pred)).view(pred.data.shape[0], -1, 1)
return pred
model = CustomRNN(input_dim, hidden_size_1, hidden_size_2, output_size)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters())
loss_func = nn.MSELoss()
lr = 1e-2
for t in range(1000):
inp = torch.Tensor(sin_train_in[..., np.newaxis] )
inp.requires_grad = True
inp = inp.to(device)
out = torch.Tensor(sin_train_out[..., np.newaxis])
out = out.to(device)
pred = model(inp)
optimizer.zero_grad()
loss = loss_func(pred, out)
if t%20==0:
print(t, loss.data.item())
lr = lr / 1.0001
optimizer.param_groups[0]['lr'] = lr
loss.backward()
optimizer.step()
test_in = sin_test_in
inp = torch.Tensor(test_in[np.newaxis, ... , np.newaxis] )
inp = inp.to(device)
pred = model(inp).cpu().detach().numpy()
plt.plot(range(len(sin_test_in)), test_in)
plt.plot(range(len(sin_test_in)), pred[0, :,0])
plt.show
orginal_SNR = np.sum(np.abs(sin_test_out)**2) / np.sum(np.abs(sin_test_in - sin_test_out)**2)
orginal_SNR_db = 10*np.log(orginal_SNR)/np.log(10)
print('Original SNR : ', orginal_SNR)
print('Original SNR DB : ', orginal_SNR_db)
network_SNR = np.sum(np.abs(sin_test_out)**2) / np.sum(np.abs(pred[0, :,0] - sin_test_out)**2)
network_SNR_db = 10*np.log(network_SNR)/np.log(10)
print('Network SNR : ', network_SNR)
print('Network SNR DB : ', network_SNR_db)
Original SNR : 12.951857235597608
Original SNR DB : 11.123320486750668
Network SNR : 107.29848229242438
Network SNR DB : 20.305935790331755
RNN + Triangular
# RNN model
input_dim = 1
hidden_size_1 = 60
hidden_size_2 = 60
output_size = 1
class CustomRNN(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, output_size):
super(CustomRNN, self).__init__()
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size_1, batch_first=True)
self.linear = nn.Linear(hidden_size_1, hidden_size_2, )
self.act = nn.Tanh()
self.linear = nn.Linear(hidden_size_2, output_size, )
self.act = nn.Tanh()
def forward(self, x):
pred, hidden = self.rnn(x, None)
pred = self.act(self.linear(pred)).view(pred.data.shape[0], -1, 1)
return pred
model = CustomRNN(input_dim, hidden_size_1, hidden_size_2, output_size)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters())
loss_func = nn.MSELoss()
lr = 1e-2
for t in range(1000):
inp = torch.Tensor(tri_train_in[..., np.newaxis] )
inp.requires_grad = True
inp = inp.to(device)
out = torch.Tensor(tri_train_out[..., np.newaxis])
out = out.to(device)
pred = model(inp)
optimizer.zero_grad()
loss = loss_func(pred, out)
if t%20==0:
print(t, loss.data.item())
lr = lr / 1.0001
optimizer.param_groups[0]['lr'] = lr
loss.backward()
optimizer.step()
test_in = tri_test_in
inp = torch.Tensor(test_in[np.newaxis, ... , np.newaxis] )
inp = inp.to(device)
pred = model(inp).cpu().detach().numpy()
plt.plot(range(len(tri_test_in)), test_in)
plt.plot(range(len(tri_test_in)), pred[0, :,0])
plt.show
orginal_SNR = np.sum(np.abs(tri_test_out)**2) / np.sum(np.abs(tri_test_in - tri_test_out)**2)
orginal_SNR_db = 10*np.log(orginal_SNR)/np.log(10)
print('Original SNR : ', orginal_SNR)
print('Original SNR DB : ', orginal_SNR_db)
network_SNR = np.sum(np.abs(tri_test_out)**2) / np.sum(np.abs(pred[0, :,0] - tri_test_out)**2)
network_SNR_db = 10*np.log(network_SNR)/np.log(10)
print('Network SNR : ', network_SNR)
print('Network SNR DB : ', network_SNR_db)
Original SNR : 9.06282337035853
Original SNR DB : 9.572635159053185
Network SNR : 46.622532666082044
Network SNR DB : 16.685958619136
RNN + Sawtooth
# RNN model
input_dim = 1
hidden_size_1 = 60
hidden_size_2 = 60
output_size = 1
class CustomRNN(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, output_size):
super(CustomRNN, self).__init__()
self.rnn = nn.RNN(input_size=input_size, hidden_size=hidden_size_1, batch_first=True)
self.linear = nn.Linear(hidden_size_1, hidden_size_2, )
self.act = nn.Tanh()
self.linear = nn.Linear(hidden_size_2, output_size, )
self.act = nn.Tanh()
def forward(self, x):
pred, hidden = self.rnn(x, None)
pred = self.act(self.linear(pred)).view(pred.data.shape[0], -1, 1)
return pred
model = CustomRNN(input_dim, hidden_size_1, hidden_size_2, output_size)
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters())
loss_func = nn.MSELoss()
lr = 1e-2
for t in range(1000):
inp = torch.Tensor(tri_train_in[..., np.newaxis] )
inp.requires_grad = True
inp = inp.to(device)
out = torch.Tensor(tri_train_out[..., np.newaxis])
out = out.to(device)
pred = model(inp)
optimizer.zero_grad()
loss = loss_func(pred, out)
if t%20==0:
print(t, loss.data.item())
lr = lr / 1.0001
optimizer.param_groups[0]['lr'] = lr
loss.backward()
optimizer.step()
test_in = saw_test_in
inp = torch.Tensor(test_in[np.newaxis, ... , np.newaxis] )
inp = inp.to(device)
pred = model(inp).cpu().detach().numpy()
plt.plot(range(len(saw_test_in)), test_in)
plt.plot(range(len(saw_test_in)), pred[0, :,0])
plt.show
orginal_SNR = np.sum(np.abs(saw_test_out)**2) / np.sum(np.abs(saw_test_in - saw_test_out)**2)
orginal_SNR_db = 10*np.log(orginal_SNR)/np.log(10)
print('Original SNR : ', orginal_SNR)
print('Original SNR DB : ', orginal_SNR_db)
network_SNR = np.sum(np.abs(saw_test_out)**2) / np.sum(np.abs(pred[0, :,0] - saw_test_out)**2)
network_SNR_db = 10*np.log(network_SNR)/np.log(10)
print('Network SNR : ', network_SNR)
print('Network SNR DB : ', network_SNR_db)
Original SNR : 8.918716305325825
Original SNR DB : 9.50302349708762
Network SNR : 26.97065260659425
Network SNR DB : 14.308914551667852
知乎学术咨询:
https://www.zhihu.com/consult/people/792359672131756032?isMe=1
工学博士,担任《Mechanical System and Signal Processing》《中国电机工程学报》《控制与决策》等期刊审稿专家,擅长领域:现代信号处理,机器学习,深度学习,数字孪生,时间序列分析,设备缺陷检测、设备异常检测、设备智能故障诊断与健康管理PHM等。
原文地址:https://blog.csdn.net/weixin_39402231/article/details/140148827
免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!