# A couple requried importsfrom glob import glob import numpy as npfrom matplotlib import pyplot as pltimport osimport urllibimport zipFilefrom glob import globfrom tifffile import imread# Importing the deep learning frameworkfrom csbdeep.models import CARE, Configfrom csbdeep.utils import plot_some, plot_historyimport sslssl._create_default_https_context = ssl._create_unverified_context# create a folder for our data.if not os.path.isdir('./data'):os.mkdir('./data')# check if data has been downloaded alreadyzipPath="data/SEM.zip"if not os.path.exists(zipPath):#download and unzip datadata = urllib.request.urlretrieve('https://download.fht.org/jug/n2n_sem_data/n2n_sem_data.zip', zipPath)with zipfile.ZipFile(zipPath, 'r') as zip_ref:zip_ref.extractall("data")# Load the SEM stack train_imgs = np.moveaxis(imread('data/SEM/train/train.tif'), 0, -1)print(train_imgs.shape)(2971, 1690, 7)plt.figure(figsize=(21,3))for i in range(train_imgs.shape[-1]):plt.subplot(1,7,i+1)plt.imshow(train_imgs[1400:1600,400:600,i], cmap='gray_r', interpolation='nearest')plt.axis('off')摘要:# A couple requried importsfrom glob import glob import numpy as npfrom matplotlib import pyplot as pltimport osimport urllibimpor
Now we have to extract training patches from the large SEM images.
def get_patches(img, shape=(128, 128)):patches = for y in range(0, img.shape[0]-shape[0], shape[0]):for x in range(0, img.shape[1]-shape[1], shape[1]):patches.append(img[y:y+shape[0], x:x+shape[0]])return np.array(patches)# Extract 128x128 patches from all seven noisy imagespatches = get_patches(train_imgs)np.random.shuffle(patches)train_patches = patches[:270]val_patches = patches[270:]print(train_patches.shape)print(val_patches.shape)(270, 128, 128, 7)(29, 128, 128, 7)Now we assemble X, Y, X_val and Y_val.
X = Y = for patch in train_patches:for i in range(7):for j in range(7):if i != j:X.append(patch[...,i])Y.append(patch[...,j])X = np.array(X)[...,np.newaxis]Y = np.array(Y)[...,np.newaxis]X_val = Y_val = for patch in val_patches:for i in range(7):for j in range(7):if i != j:X_val.append(patch[...,i])Y_val.append(patch[...,j])X_val = np.array(X_val)[...,np.newaxis]Y_val = np.array(Y_val)[...,np.newaxis]# Look at some patchesidx = 10plt.figure(figsize=(15,7.5))plt.subplot(1,2,1)plt.imshow(X[idx,...,0], cmap='gray_r')plt.axis('off');plt.title('X');plt.subplot(1,2,2)plt.imshow(Y[idx,...,0], cmap='gray_r')plt.axis('off');plt.title('Y');plt.show# Check train- and val-patches shapesprint(X.shape, Y.shape)print(X_val.shape, Y_val.shape)(11340, 128, 128, 1) (11340, 128, 128, 1)(1218, 128, 128, 1) (1218, 128, 128, 1)# Now we normalize the data to 0 mean and 1 standard deviation.mean = np.mean(np.concatenate([X, Y], axis=-1))std = np.std(np.concatenate([X, Y], axis=-1))def normalize(data, mean, std):return (data - mean)/stdX = normalize(X, mean, std)Y = normalize(Y, mean, std)X_val = normalize(X_val, mean, std)Y_val = normalize(Y_val, mean, std)# Note: You want to increase train_epochs to something around 100-200.config = Config(axes='YXC', n_channel_in=1, n_channel_out=1, train_loss='mse', unet_kern_size=3,train_steps_per_epoch=200, train_epochs=30)vars(config){'n_dim': 2,'axes': 'YXC','n_channel_in': 1,'n_channel_out': 1,'train_checkpoint': 'weights_best.h5','train_checkpoint_last': 'weights_last.h5','train_checkpoint_epoch': 'weights_now.h5','probabilistic': False,'unet_residual': True,'unet_n_depth': 2,'unet_kern_size': 3,'unet_n_first': 32,'unet_last_activation': 'linear','unet_input_shape': (None, None, 1),'train_loss': 'mse','train_epochs': 30,'train_steps_per_epoch': 200,'train_learning_rate': 0.0004,'train_batch_size': 16,'train_tensorboard': True,'train_reduce_lr': {'factor': 0.5, 'patience': 10, 'min_delta': 0}}# Here we create a new model, which is called exmaple_model and stored in the directory 'models'.model = CARE(config, name='N2N_SEM', basedir='models')history = model.train(X, Y, (X_val, Y_val))Epoch 27/30200/200 - 8s 42ms/step - loss: 0.5888 - mse: 0.5888 - mae: 0.6116 - val_loss: 0.5935 - val_mse: 0.5935 - val_mae: 0.6122Epoch 28/30200/200 - 8s 42ms/step - loss: 0.5949 - mse: 0.5951 - mae: 0.6160 - val_loss: 0.5989 - val_mse: 0.5989 - val_mae: 0.6188Epoch 29/30200/200 - 8s 42ms/step - loss: 0.5936 - mse: 0.5936 - mae: 0.6144 - val_loss: 0.5937 - val_mse: 0.5937 - val_mae: 0.6100Epoch 30/30200/200 - 8s 42ms/step - loss: 0.5944 - mse: 0.5944 - mae: 0.6161 - val_loss: 0.5919 - val_mse: 0.5919 - val_mae: 0.6128Loading network weights from 'weights_best.h5'.print(sorted(list(history.history.keys)))plt.figure(figsize=(16,5))plot_history(history,['loss','val_loss'],['mse','val_mse']);['loss', 'lr', 'mae', 'mse', 'val_loss', 'val_mae', 'val_mse']Predictdef denormalize(img, mean, std):return (img * std) + meantest_imgs = imread('data/SEM/test/test.tif')print(test_imgs.shape)(7, 2959, 1666)test_denoised = for img in test_imgs:test_denoised.append(denormalize(model.predict(normalize(img, mean, std), 'YX', normalizer=None), mean, std))plt.figure(figsize=(21,6))for i in range(train_imgs.shape[-1]):plt.subplot(2,7,i+1)plt.imshow(test_imgs[i][1400:1600,400:600], cmap='gray_r', interpolation='nearest')plt.axis('off')plt.title('Image #{}'.format(i));for i in range(train_imgs.shape[-1]):plt.subplot(2,7,i+8)plt.imshow(test_denoised[i][1400:1600,400:600], cmap='gray_r', interpolation='nearest')plt.axis('off')# We can create a ground truth (GT) image by averaging over all noisy images# But keep in mind, that this GT is still 'noisy'.gt = np.mean(test_imgs, axis=0)idx=0plt.figure(figsize=(21,7))plt.subplot(1,3,1)plt.imshow(gt[1000:1500,400:900], cmap='gray_r')plt.axis('off');plt.title('Average of All Noisy Images (GT)')plt.subplot(1,3,2)plt.imshow(test_imgs[idx][1000:1500,400:900], cmap='gray_r')plt.axis('off');plt.title('Input Image #{}'.format(idx));plt.subplot(1,3,3)plt.imshow(test_denoised[idx][1000:1500,400:900], cmap='gray_r')plt.axis('off');plt.title('Denoised Image #{}'.format(idx));def PSNR(gt, img):mse = np.mean((gt - img)**2)r = np.max(gt) - np.min(gt)return 20*np.log10(r) - 10*np.log10(mse)# With a GT image we can compute PSNR values. for i in range(test_imgs.shape[0]):psnr_input = PSNR(gt, test_imgs[i])psnr_denoised = PSNR(gt, test_denoised[i])print('PSNR Input: {}; PSNR Denoised: {}'.format(np.round(psnr_input,2), np.round(psnr_denoised,2)))PSNR Input: 10.02; PSNR Denoised: 17.72PSNR Input: 12.95; PSNR Denoised: 18.74PSNR Input: 13.46; PSNR Denoised: 19.07PSNR Input: 12.54; PSNR Denoised: 19.39PSNR Input: 13.76; PSNR Denoised: 20.04PSNR Input: 14.65; PSNR Denoised: 20.59PSNR Input: 18.49; PSNR Denoised: 21.27知乎学术咨询:
https://www.zhihu.com/consult/people/792359672131756032?isMe=1担任《Mechanical System and Signal Processing》《中国电机工程学报》等期刊审稿专家,擅长领域:信号滤波/降噪,机器学习/深度学习,时间序列预分析/预测,设备故障诊断/缺陷检测/异常检测。
分割线分割线
基于经验模态分解、离散小波变换和AR模型的肌电图信号降噪(Python,ipynb文件)
NS: noisy signalS: original siganlmean filter: ws = window sizemedian filter:average filter: ns = number of noisy signal(different)bandpass filter: l = low cut-off frequency, h = high ...threshold filter: r = ratio(max abs(fft) / min ...)wavelet filter: a = thresholdstd filter:NN: neural network完整代码
基于整数小波变换的信号降噪方法(Python)
完整代码:
完整代码:
基于偏微分方程模型的信号降噪(MATLAB R2021B)
完整代码:
基于优化Morlet小波的一维信号瞬态特征提取方法(MATLAB )
程序运行环境为MATLAB R2018A,利用优化Morlet小波对一维信号进行瞬态特征提取。程序测试了模拟信号,地震信号,发动机销子活塞故障振动信号,发动机气门正常振动信号,发动机排气门故障振动信号,结果如下。
完整代码可通过知乎付费咨询获得(注意:每次付费咨询只能获得一套代码):
MATLAB环境下基于高分辨时频分析与阈值法的一维信号降噪
基于小波分析的时间序列降噪(Python,ipynb文件)
完整代码:
基于连续小波变换的信号滤波方法(Python,ipynb文件)
完整代码:
信号的时域、频域和时频域特征提取(Python)
完整代码:
不同小波族的优缺点
完整代码:
完整代码可通过知乎付费咨询获得
基于LSTM的滚动轴承剩余使用寿命预测(FEMTO-ST数据集,Python,iypnb文件)
完整代码可通过知乎付费咨询获得
基于MATLAB的心电信号处理与心率计算(MATLAB R2018A)
来源:小李看科技