model.compile(loss='poisson', optimizer=adam) hist = model.fit(C_Bind, Y_Bind, nb_epoch=10, show_accuracy=True, verbose=0, validation_data=(C_Bind, Y_Bind)) Hist_Bind[4 * k - 4] = hist.history['acc'] Hist_Bind[4 * k - 3] = hist.history['loss'] Hist_Bind[4 * k - 2] = hist.history['val_acc'] Hist_Bind[4 * k - 1] = hist.history['val_loss'] # ----------------------------------------------------- pred_class_Bind = model.predict_classes(C, batch_size=200, verbose=0) s_nn_hat_Bind = hstack((zeros(k), pred_class_Bind, zeros(k))) x_nn_hat_Bind = N_DUDE.denoise_with_s(z[i], s_nn_hat_Bind, k) error_nn_Bind = N_DUDE.error_rate(x, x_nn_hat_Bind) print '1-D N-DUDE Context Bind=', error_nn_Bind Error_One_NN_DUDE_Bind[i, k] = error_nn_Bind X_hat_One_NN_DUDE_Bind[i * k_max + k - 1] = x_nn_hat_Bind s_class = 3 s_nn_hat_cat_Bind = np_utils.to_categorical(s_nn_hat_Bind, s_class) emp_dist = dot(Z[i * n:(i + 1) * n, ], L[i * alpha_size:(i + 1) * alpha_size, ]) est_loss_nn_dude_Bind = mean(sum(emp_dist * s_nn_hat_cat_Bind, axis=1)) Est_Loss_NN_DUDE_CB[i, k] = est_loss_nn_dude_Bind One_NN_Bind_End = time.time()
model.fit(C2, Y2, nb_epoch=1, batch_size=128, show_accuracy=False, verbose=1) model.fit(C1, Y1, nb_epoch=1, batch_size=128, show_accuracy=False, verbose=1) pred_class = model.predict_classes(C1, batch_size=128, verbose=0) s_nn_hat = hstack((zeros(k), pred_class, zeros(k))) x_nn_hat = N_DUDE.denoise_with_s(z[0], s_nn_hat, k) error_nn = N_DUDE.error_rate(x, x_nn_hat) print '1-D N-DUDE trained =', error_nn Error_One_NN_DUDE[0, k] = error_nn X_hat_One_NN_DUDE[k_max * 0, :] = x_nn_hat One_NN_End = time.time() One_NN_Duration = One_NN_End - One_NN_Start print 'Time =', One_NN_Duration print "---------------------------------------------------" res_file = '/HDD/user/yoon/Yoon_SV4/N-DUDE_SV4/NeuralDUDE_Delta_Variation/Result_Plot/Fine_Tune_One_ver5_28' np.savez(res_file, Error_One_DUDE=Error_One_DUDE, Error_One_NN_DUDE=Error_One_NN_DUDE,
clipnorm=1.0) model.compile(loss='poisson', optimizer=adam) model.fit(C, Y, nb_epoch=10, batch_size=100, show_accuracy=True, verbose=0, validation_data=(C, Y)) # ----------------------------------------------------- pred_class = model.predict_classes(C, batch_size=200, verbose=0) s_nn_hat = hstack((zeros(k), pred_class, zeros(k))) x_nn_hat = N_DUDE.denoise_with_s(z[i], s_nn_hat, k) error_nn = N_DUDE.error_rate(x, x_nn_hat) print '1-D N-DUDE=', error_nn Error_One_NN_DUDE[i, k] = error_nn X_hat_One_NN_DUDE[k_max * i + k - 1, :] = x_nn_hat s_class = 3 s_nn_hat_cat = np_utils.to_categorical(s_nn_hat, s_class) emp_dist = dot(Z[i * n:(i + 1) * n, ], L[i * alpha_size:(i + 1) * alpha_size, ]) est_loss_nn_dude = mean(sum(emp_dist * s_nn_hat_cat, axis=1)) Est_Loss_One_NN_DUDE[i, k] = est_loss_nn_dude One_NN_End = time.time()
model.add(Dense(3,init='he_normal')) model.add(Activation('softmax')) rms=RMSprop(lr=0.001, rho=0.9, epsilon=1e-06,clipnorm=1.5) adagrad=Adagrad(clipnorm=1.5) adam=Adam() adadelta=Adadelta() sgd=SGD(lr=0.01,decay=1e-6,momentum=0.95, nesterov=True, clipnorm=1.0) model.compile(loss='poisson', optimizer=adam) model.fit(C_PD,Y_PD,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_PD, Y_PD)) # ----------------------------------------------------- s_nn_hat=model.predict_classes(C_PD, batch_size=200, verbose=0) x_nn_hat=N_DUDE.denoise_with_s(z[i],s_nn_hat,1) error_nn=N_DUDE.error_rate(x,x_nn_hat) print '1-D N-DUDE Padding=', error_nn Error_One_NN_DUDE_PD[i,1]=error_nn X_hat_One_NN_DUDE_PD[k_max*i,:]=x_nn_hat s_class=3 s_nn_hat_cat=np_utils.to_categorical(s_nn_hat,s_class) emp_dist=dot(Z[i*n:(i+1)*n,],L[i*alpha_size:(i+1)*alpha_size,]) est_loss_nn_dude=mean(sum(emp_dist*s_nn_hat_cat,axis=1)) Est_Loss_One_NN_DUDE_PD[i,1]=est_loss_nn_dude ### Save the model & weights ### model_json=model.to_json()