“…In the modeling application, [29] had shown that a learning of a function that [time ma1 ma2] = textread('data/noises_ma.txt','%f %f %f'); random_noise = randn (1,size(time,1))'/5; %data files data_full_length = size(time,1); data_train_start = wInput; data_train_end = train_length; data_test_start = train_length + wInput; data_test_end = train_length + test_length; % -layer (1).node x = 1:data_full_length; ECG_clean = ecg1; wn1 = 0.5*(rand(data_full_length,1)-0.5*ones(data_full_length,1)); pw1 = 1/2 * sin(2*pi*x*1/360*60)'; ECG_noisy = ecg1 + ma1 + wn1 + bw1 + pw1 + em1; %assign data input_data = ECG_noisy; %(1:data_train_end+layer(1).node+1); reference_data = ECG_clean; %(1:data_train_end+layer(1).node+1); %% Normalization dmin = min( min(input_data), min(reference_data)); dmax = max( min(input_data), max(reference_data)); input_data = normalization(input_data,dmin,dmax); reference_data = normalization(reference_data,dmin,dmax); %% Initialize the parameter last_layer = size(layer,2); input_length = layer(1).node; layer (1).weight (1:layer(2).node) = 1; %always kept as 1 layer (1).output = zeros (1,layer(1).node); layer (1).bias = 0; %rand(); %no bias for first layer for i=2:size(layer,2)-1, layer(i).weight = rand(layer(i).node,layer(i-1).node)-0.5; layer(i).output = zeros (1,layer(i).node); layer(i).bias = rand (1,layer(i).node)-0.5; end %layer(total_layer).input (1:layer(total_layer-1).node) = 0; layer(last_layer).weight = rand(layer(last_layer).node,layer(last_layer-1).node)-0.5; layer(last_layer).output = zeros (1,layer(last_layer).node); layer(last_layer).bias = rand (1,layer(last_layer).node)-0.5; error(1) = 0; %% Learning tic; for i=1:iteration, %iter = i for jj=data_train_start:data_train_end, j = ceil((data_train_end-wInput). *rand(1,1))+wInput; X = input_data(j-wInput+1:j)'; cof = wavelet(W, L, X); layer (1).output = cof( Ainput(WaveParaChoice).len ); reference = reference_data(j-layer(last_layer).node+1:j)'; for k=2:last_layer, for m=1:layer(k).node, layer(k).weight_sum(m) = sum(layer(k-1).output. *layer(k).weight(m,:))-layer(k).bias(m); layer(k).output(m) = activation_function(layer(k).weight_sum(m)); end %m end %k output_train(j-layer(last_layer).node+1:j) = layer(last_layer).output; error(j-layer(last_layer).node+1:j) = referenceoutput_train(j-layer(last_layer).node+1:j); layer_old = layer; layer(last_layer).delta = error(jlayer(last_layer).node+1:j) .…”