Beispiel #1
0
        model.compile(loss='poisson', optimizer=adam)
        model.fit(C,
                  Y,
                  nb_epoch=10,
                  batch_size=100,
                  show_accuracy=True,
                  verbose=0,
                  validation_data=(C, Y))

        # -----------------------------------------------------

        pred_class = model.predict_classes(C, batch_size=200, verbose=0)
        s_nn_hat = hstack((zeros(k), pred_class, zeros(k)))
        x_nn_hat = N_DUDE.denoise_with_s(z[i], s_nn_hat, k)
        error_nn = N_DUDE.error_rate(x, x_nn_hat)

        print '1-D N-DUDE=', error_nn

        Error_One_NN_DUDE[i, k] = error_nn
        X_hat_One_NN_DUDE[k_max * i + k - 1, :] = x_nn_hat

        s_class = 3
        s_nn_hat_cat = np_utils.to_categorical(s_nn_hat, s_class)
        emp_dist = dot(Z[i * n:(i + 1) * n, ],
                       L[i * alpha_size:(i + 1) * alpha_size, ])
        est_loss_nn_dude = mean(sum(emp_dist * s_nn_hat_cat, axis=1))
        Est_Loss_One_NN_DUDE[i, k] = est_loss_nn_dude

        One_NN_End = time.time()
        One_NN_Duration = One_NN_End - One_NN_Start
                         verbose=0,
                         validation_data=(C_Bind_Bound, Y_Bind_Bound))
        Hist_Bind_Bound[4 * k - 4] = hist.history['acc']
        Hist_Bind_Bound[4 * k - 3] = hist.history['loss']
        Hist_Bind_Bound[4 * k - 2] = hist.history['val_acc']
        Hist_Bind_Bound[4 * k - 1] = hist.history['val_loss']
        # -----------------------------------------------------

        pred_class_Bind_Bound = model.predict_classes(C,
                                                      batch_size=200,
                                                      verbose=0)
        s_nn_hat_Bind_Bound = hstack(
            (zeros(k), pred_class_Bind_Bound, zeros(k)))
        x_nn_hat_Bind_Bound = N_DUDE.denoise_with_s(z[i], s_nn_hat_Bind_Bound,
                                                    k)
        error_nn_Bind_Bound = N_DUDE.error_rate(x, x_nn_hat_Bind_Bound)

        print '1-D N-DUDE Context Bind Bound=', error_nn_Bind_Bound

        Error_One_NN_DUDE_Bind_Bound[i, k] = error_nn_Bind_Bound
        X_hat_One_NN_DUDE_Bind_Bound[i * k_max + k - 1] = x_nn_hat_Bind_Bound

        One_NN_Bind_Bound_End = time.time()
        One_NN_Bind_Bound_Duration = One_NN_Bind_Bound_End - One_NN_Bind_Bound_Start
        ### 1-D N-DUDE Context Bind Normalization Bound ###
        One_NN_Bind_Norm_Bound_Start = time.time()

        model = Sequential()
        model.add(Dense(40, input_dim=2 * k * nb_classes, init='he_normal'))
        model.add(Activation('relu'))
        model.add(Dense(40, init='he_normal'))
Beispiel #3
0
        model.fit(C_two,
                  Y_two,
                  nb_epoch=10,
                  batch_size=100,
                  show_accuracy=True,
                  verbose=0,
                  validation_data=(C_two, Y_two))

        # -----------------------------------------------------

        pred_class_two = model.predict_classes(C_two,
                                               batch_size=200,
                                               verbose=0)
        s_nn_hat_two = N_DUDE.mapping_mat_resize(pred_class_two, k, n)
        x_nn_hat_two = N_DUDE.denoise_with_s_Two_NN_DUDE(z[i], s_nn_hat_two)
        error_nn_two = N_DUDE.error_rate(x, x_nn_hat_two)
        print '2-D N-DUDE=', error_nn_two

        Error_Two_NN_DUDE_Pre[i, k] = error_nn_two
        X_hat_Two_NN_DUDE_Pre[k_max * i + k - 1, :] = x_nn_hat_two

        s_class_two = 3
        s_nn_hat_cat_two = np_utils.to_categorical(s_nn_hat_two, s_class_two)
        emp_dist_two = dot(Z[i * n:(i + 1) * n, ],
                           L[i * alpha_size:(i + 1) * alpha_size, ])
        est_loss_nn_dude_two = mean(
            sum(emp_dist_two * s_nn_hat_cat_two, axis=1))
        Est_Loss_Two_NN_DUDE_Pre[i, k] = est_loss_nn_dude_two

        ### Save the model & weights ###
        model_json = model.to_json()
        model.compile(loss='poisson', optimizer=adam)
        model.fit(C,
                  Y,
                  nb_epoch=10,
                  batch_size=100,
                  show_accuracy=True,
                  verbose=0,
                  validation_data=(C, Y))

        # -----------------------------------------------------
        proba = model.predict_proba(C, batch_size=200, verbose=0)
        proba_new = RD.rand_new(proba)
        class_new = RD.make_new_class(proba_new)
        s_nn_hat = hstack((zeros(k), class_new, zeros(k)))
        x_nn_hat = N_DUDE.denoise_with_s(z[i], s_nn_hat, k)
        error_nn = N_DUDE.error_rate(x, x_nn_hat)

        print '1-D N-DUDE=', error_nn

        Error_One_NN_DUDE_RD[i, k] = error_nn
        X_hat_One_NN_DUDE_RD[k_max * i + k - 1, :] = x_nn_hat

        s_class = 3
        s_nn_hat_cat = np_utils.to_categorical(s_nn_hat, s_class)
        emp_dist = dot(Z[i * n:(i + 1) * n, ],
                       L[i * alpha_size:(i + 1) * alpha_size, ])
        est_loss_nn_dude = mean(sum(emp_dist * s_nn_hat_cat, axis=1))
        Est_Loss_NN_DUDE[i, k] = est_loss_nn_dude

        ### 1-D N-DUDE Context Bind ###
                  Y2,
                  nb_epoch=1,
                  batch_size=128,
                  show_accuracy=False,
                  verbose=1)
        model.fit(C1,
                  Y1,
                  nb_epoch=1,
                  batch_size=128,
                  show_accuracy=False,
                  verbose=1)

        pred_class = model.predict_classes(C1, batch_size=128, verbose=0)
        s_nn_hat = hstack((zeros(k), pred_class, zeros(k)))
        x_nn_hat = N_DUDE.denoise_with_s(z[0], s_nn_hat, k)
        error_nn = N_DUDE.error_rate(x, x_nn_hat)
        print '1-D N-DUDE trained =', error_nn
        Error_One_NN_DUDE[0, k] = error_nn
        X_hat_One_NN_DUDE[k_max * 0, :] = x_nn_hat

        One_NN_End = time.time()
        One_NN_Duration = One_NN_End - One_NN_Start

        print 'Time =', One_NN_Duration
        print "---------------------------------------------------"

        res_file = '/HDD/user/yoon/Yoon_SV4/N-DUDE_SV4/NeuralDUDE_Delta_Variation/Result_Plot/Fine_Tune_One_ver5_28'
        np.savez(res_file,
                 Error_One_DUDE=Error_One_DUDE,
                 Error_One_NN_DUDE=Error_One_NN_DUDE,
                 X_hat_One_NN_DUDE=X_hat_One_NN_DUDE)
        model.compile(loss='poisson', optimizer=adam)
        model.fit(C,
                  Y,
                  nb_epoch=10,
                  batch_size=100,
                  show_accuracy=True,
                  verbose=0,
                  validation_data=(C, Y))

        # -----------------------------------------------------

        pred_class = model.predict_classes(C, batch_size=200, verbose=0)
        s_nn_hat = hstack((zeros(k), pred_class, zeros(k)))
        x_nn_hat = N_DUDE.denoise_with_s(z[i], s_nn_hat, k)
        error_nn = N_DUDE.error_rate(x, x_nn_hat)

        print '1-D N-DUDE=', error_nn

        Error_One_NN_DUDE[i, k] = error_nn
        X_hat_One_NN_DUDE[k_max * i + k - 1, :] = x_nn_hat

        s_class = 3
        s_nn_hat_cat = np_utils.to_categorical(s_nn_hat, s_class)
        emp_dist = dot(Z[i * n:(i + 1) * n, ],
                       L[i * alpha_size:(i + 1) * alpha_size, ])
        est_loss_nn_dude = mean(sum(emp_dist * s_nn_hat_cat, axis=1))
        Est_Loss_One_NN_DUDE[i, k] = est_loss_nn_dude

        ### 1-D N-DUDE Padding ###
        C_PD, Y_PD = N_DUDE.make_data_for_One_NN_DUDE_PD(
 
 rms=RMSprop(lr=0.001, rho=0.9, epsilon=1e-06,clipnorm=1.5)
 adagrad=Adagrad(clipnorm=1.5)
 adam=Adam()
 adadelta=Adadelta()
 sgd=SGD(lr=0.01,decay=1e-6,momentum=0.95, nesterov=True, clipnorm=1.0)
 
 model.compile(loss='poisson', optimizer=adam)
 model.fit(C,Y,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C, Y))
 
 # -----------------------------------------------------
 
 pred_class=model.predict_classes(C, batch_size=200, verbose=0)
 s_nn_hat=hstack((zeros(k),pred_class,zeros(k)))
 x_nn_hat=N_DUDE.denoise_with_s(z[i],s_nn_hat,k)
 error_nn=N_DUDE.error_rate(x,x_nn_hat)
 
 print '1-D N-DUDE=', error_nn
 
 Error_One_NN_DUDE[i,k]=error_nn
 X_hat_One_NN_DUDE[k_max*i+k-1,:]=x_nn_hat
 
 s_class=3
 s_nn_hat_cat=np_utils.to_categorical(s_nn_hat,s_class)
 emp_dist=dot(Z[i*n:(i+1)*n,],L[i*alpha_size:(i+1)*alpha_size,])
 est_loss_nn_dude=mean(sum(emp_dist*s_nn_hat_cat,axis=1))
 Est_Loss_One_NN_DUDE[i,k]=est_loss_nn_dude
 
 One_NN_End=time.time()
 One_NN_Duration=One_NN_End-One_NN_Start
 
 model.fit(C_two2,Y_two2,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two2, Y_two2))
 model.fit(C_two1,Y_two1,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two1, Y_two1))
 model.fit(C_two10,Y_two10,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two10, Y_two10))
 model.fit(C_two9,Y_two9,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two9, Y_two9))
 model.fit(C_two8,Y_two8,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two8, Y_two8))
 model.fit(C_two7,Y_two7,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two7, Y_two7))
 model.fit(C_two6,Y_two6,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two6, Y_two6))
 model.fit(C_two5,Y_two5,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two5, Y_two5))
 model.fit(C_two4,Y_two4,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two4, Y_two4))
 model.fit(C_two3,Y_two3,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two3, Y_two3))
 model.fit(C_two2,Y_two2,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two2, Y_two2))
 model.fit(C_two1,Y_two1,nb_epoch=10,batch_size=100,show_accuracy=True, verbose=0, validation_data=(C_two1, Y_two1))
 
 pred_class_two=model.predict_classes(C_two1, batch_size=200, verbose=0)
 s_nn_hat_two=N_DUDE.mapping_mat_resize(pred_class_two,k,n)
 x_nn_hat_two=N_DUDE.denoise_with_s_Two_NN_DUDE(z[i],s_nn_hat_two) 
 error_nn_two=N_DUDE.error_rate(x,x_nn_hat_two)
 print '2-D N-DUDE =', error_nn_two
 Error_Two_NN_DUDE[0,k]=error_nn_two
 X_hat_Two_NN_DUDE[k_max*0+k-1,:]=x_nn_hat_two
 
 One_NN_End=time.time()
 One_NN_Duration=One_NN_End-One_NN_Start
 
 print 'Time =', One_NN_Duration
 print "---------------------------------------------------"
 
 res_file='/HDD/user/yoon/Yoon_SV4/N-DUDE_SV4/NeuralDUDE_Delta_Variation/Result_Plot/Fine_Tune_Two_ver5_6'
 np.savez(res_file, Error_Two_DUDE=Error_Two_DUDE,
          Error_Two_NN_DUDE=Error_Two_NN_DUDE,
          X_hat_Two_NN_DUDE=X_hat_Two_NN_DUDE)
        model.compile(loss='poisson', optimizer=adam)
        model.fit(C_Bind,
                  Y_Bind,
                  nb_epoch=10,
                  show_accuracy=True,
                  verbose=0,
                  validation_data=(C_Bind, Y_Bind))

        # -----------------------------------------------------

        proba_bind = model.predict_proba(C, batch_size=200, verbose=0)
        proba_bind_new = RD.rand_new(proba_bind, delta[i])
        class_bind_new = RD.make_new_class(proba_bind_new)
        s_nn_hat_Bind = hstack((zeros(k), class_bind_new, zeros(k)))
        x_nn_hat_Bind = N_DUDE.denoise_with_s(z[i], s_nn_hat_Bind, k)
        error_nn_Bind = N_DUDE.error_rate(x, x_nn_hat_Bind)

        print '1-D N-DUDE Context Bind=', error_nn_Bind

        Error_One_NN_DUDE_Bind_RD[i, k] = error_nn_Bind
        X_hat_One_NN_DUDE_Bind_RD[k_max * i + k - 1, :] = x_nn_hat_Bind

        s_class = 3
        s_nn_hat_cat_Bind = np_utils.to_categorical(s_nn_hat_Bind, s_class)
        emp_dist = dot(Z[i * n:(i + 1) * n, ],
                       L[i * alpha_size:(i + 1) * alpha_size, ])
        est_loss_nn_dude_Bind = mean(sum(emp_dist * s_nn_hat_cat_Bind, axis=1))
        Est_Loss_NN_DUDE_RD[i, k] = est_loss_nn_dude_Bind

        One_End = time.time()
        One = One_End - One_Start