X_hat_One_DUDE[k_max * i + k - 1, :] = x_dude_hat One_DUDE_End = time.time() One_DUDE_Duration = One_DUDE_End - One_DUDE_Start Total_DUDE_End = time.time() Total_DUDE_Duration = Total_DUDE_End - Total_DUDE_Start DUDE_Time[i * 2, k - 1] = One_DUDE_Duration DUDE_Time[i * 2 + 1, k - 1] = Total_DUDE_Duration ### 1-D N-DUDE ### Total_NN_Start = time.time() One_NN_Start = time.time() C, Y = N_DUDE.make_data_for_One_NN_DUDE( Z[i * n:(i + 1) * n], k, L_new[i * alpha_size:(i + 1) * alpha_size, ], nb_classes, n) model = Sequential() model.add(Dense(40, input_dim=2 * k * nb_classes, init='he_normal')) model.add(Activation('relu')) model.add(Dense(40, init='he_normal')) model.add(Activation('relu')) model.add(Dense(40, init='he_normal')) model.add(Activation('relu')) model.add(Dense(3, init='he_normal')) model.add(Activation('softmax')) rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=1.5) adagrad = Adagrad(clipnorm=1.5) adam = Adam()
for i in range(len(delta)): Error_One_NN_DUDE_Bind_Bound[i, 0] = delta[i] Error_One_NN_DUDE_Bind_Norm_Bound[i, 0] = delta[i] ### X_hat Mat ### X_hat_One_NN_DUDE_Bind_Bound = np.zeros((len(delta) * k_max, n)) X_hat_One_NN_DUDE_Bind_Norm_Bound = X_hat_One_NN_DUDE_Bind_Bound.copy() for i in range(len(delta)): print "##### delta=%0.2f #####" % delta[i] for k in range(1, k_max + 1): print 'k=', k ### 1-D N-DUDE Context Bind Bound ### One_NN_Bind_Bound_Start = time.time() C, Y = N_DUDE.make_data_for_One_NN_DUDE( Z[i * n:(i + 1) * n], k, L_new[i * alpha_size:(i + 1) * alpha_size, ], nb_classes, n) C_Bind_Bound, Y_Bind_Bound, Y_Bind_Norm_Bound, key, m = N_DUDE.make_data_for_One_NN_DUDE_Context_Bind_LB( x, z[i], Z[i * n:(i + 1) * n, ], k, L_lower, nb_classes, n) model = Sequential() model.add(Dense(40, input_dim=2 * k * nb_classes, init='he_normal')) model.add(Activation('relu')) model.add(Dense(40, init='he_normal')) model.add(Activation('relu')) model.add(Dense(40, init='he_normal')) model.add(Activation('relu')) model.add(Dense(3, init='he_normal')) model.add(Activation('softmax')) rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=1.5)
#print "##### delta=%0.2f #####" % delta[i] for k in range(1, k_max + 1): print "k =", k One_NN_Start = time.time() ### 1-D DUDE ### s_hat, m = DUDE.One_DUDE(z[0], k, delta[0]) x_dude_hat = DUDE.denoise_with_s(z[0], s_hat, k) error_dude = DUDE.error_rate(x, x_dude_hat) print '1-D DUDE =', error_dude Error_One_DUDE[0, k] = error_dude X_hat_One_DUDE[k_max * 0 + k - 1, :] = x_dude_hat ### 1-D N-DUDE ### C1, Y1 = N_DUDE.make_data_for_One_NN_DUDE( Z[0 * n:(0 + 1) * n], k, L_new[0 * alpha_size:(0 + 1) * alpha_size, ], nb_classes, n) C2, Y2 = N_DUDE.make_data_for_One_NN_DUDE( Z[1 * n:(1 + 1) * n], k, L_new[1 * alpha_size:(1 + 1) * alpha_size, ], nb_classes, n) C3, Y3 = N_DUDE.make_data_for_One_NN_DUDE( Z[2 * n:(2 + 1) * n], k, L_new[2 * alpha_size:(2 + 1) * alpha_size, ], nb_classes, n) C4, Y4 = N_DUDE.make_data_for_One_NN_DUDE( Z[3 * n:(3 + 1) * n], k, L_new[3 * alpha_size:(3 + 1) * alpha_size, ], nb_classes, n) C5, Y5 = N_DUDE.make_data_for_One_NN_DUDE( Z[4 * n:(4 + 1) * n], k, L_new[4 * alpha_size:(4 + 1) * alpha_size, ], nb_classes, n) #C6,Y6 = N_DUDE.make_data_for_One_NN_DUDE(Z[5*n:(5+1)*n],k,L_new[5*alpha_size:(5+1)*alpha_size,],nb_classes,n) #C7,Y7 = N_DUDE.make_data_for_One_NN_DUDE(Z[6*n:(6+1)*n],k,L_new[6*alpha_size:(6+1)*alpha_size,],nb_classes,n)
for k in range(1, k_max + 1): print 'k=', k Total_Start = time.time() ### 2-D DUDE ### Two_DUDE_Start = time.time() s_hat_two, m = DUDE.Two_DUDE(z_two[i], k, delta[i], n, offset) x_dude_hat_two = DUDE.denoise_with_s_Two_DUDE(z_two[i], s_hat_two, k) error_dude_two = DUDE.error_rate(x, x_dude_hat_two) print '2-D DUDE=', error_dude_two Error_Two_DUDE[i, k] = error_dude_two X_hat_Two_DUDE[k_max * i + k - 1, :] = x_dude_hat_two ### 2-D N-DUDE ### C_two, Y_two = N_DUDE.make_data_for_Two_NN_DUDE( P[i], Z[i * n:(i + 1) * n], k, L_new[i * alpha_size:(i + 1) * alpha_size, ], nb_classes, n, offset) model = Sequential() model.add(Dense(40, input_dim=2 * k * nb_classes, init='he_normal')) model.add(Activation('relu')) model.add(Dense(3, init='he_normal')) model.add(Activation('softmax')) rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=1.5) adagrad = Adagrad(clipnorm=1.5) adam = Adam() adadelta = Adadelta() sgd = SGD(lr=0.01, decay=1e-6, momentum=0.95,
X_hat_One_NN_DUDE=X_hat_One_DUDE.copy() X_hat_One_NN_DUDE_PD=X_hat_One_DUDE.copy() X_hat_Two_NN_DUDE=X_hat_One_DUDE.copy() X_hat_Two_NN_DUDE_PD=X_hat_One_DUDE.copy() X_hat_One_NN_DUDE_LB=X_hat_One_DUDE.copy() X_hat_One_NN_DUDE_PD_LB=X_hat_One_DUDE.copy() X_hat_Two_NN_DUDE_LB=X_hat_One_DUDE.copy() X_hat_Two_NN_DUDE_PD_LB=X_hat_One_DUDE.copy() for i in range(len(delta)): print "##### delta=%0.2f #####" % delta[i] print 'k=1' C_PD,Y_PD = N_DUDE.make_data_for_One_NN_DUDE_PD(P[i],Z[i*n:(i+1)*n],1,L_new[i*alpha_size:(i+1)*alpha_size,],nb_classes,n) model=Sequential() model.add(Dense(40,input_dim=2*1*nb_classes,init='he_normal')) model.add(Activation('relu')) model.add(Dense(40,init='he_normal')) model.add(Activation('relu')) model.add(Dense(40,init='he_normal')) model.add(Activation('relu')) model.add(Dense(3,init='he_normal')) model.add(Activation('softmax')) rms=RMSprop(lr=0.001, rho=0.9, epsilon=1e-06,clipnorm=1.5) adagrad=Adagrad(clipnorm=1.5) adam=Adam() adadelta=Adadelta()
X_hat_Two_DUDE[k_max*i+k-1,:]=x_dude_hat_two Two_DUDE_End=time.time() Two_DUDE_Duration=Two_DUDE_End-Two_DUDE_Start Total_DUDE_End=time.time() Total_DUDE_Duration=Total_DUDE_End-Total_DUDE_Start DUDE_Time[i*3,k-1]=One_DUDE_Duration DUDE_Time[i*3+1,k-1]=Two_DUDE_Duration DUDE_Time[i*3+2,k-1]=Total_DUDE_Duration ### 1-D N-DUDE ### Total_NN_Start=time.time() One_NN_Start=time.time() C,Y = N_DUDE.make_data_for_One_NN_DUDE(Z[i*n:(i+1)*n],k,L_new[i*alpha_size:(i+1)*alpha_size,],nb_classes,n) model=Sequential() model.add(Dense(40,input_dim=2*k*nb_classes,init='he_normal')) model.add(Activation('relu')) model.add(Dense(40,init='he_normal')) model.add(Activation('relu')) model.add(Dense(40,init='he_normal')) model.add(Activation('relu')) model.add(Dense(3,init='he_normal')) model.add(Activation('softmax')) rms=RMSprop(lr=0.001, rho=0.9, epsilon=1e-06,clipnorm=1.5) adagrad=Adagrad(clipnorm=1.5) adam=Adam() adadelta=Adadelta()
for i in range(len(delta)): Est_Loss_One_NN_DUDE[i, 0] = delta[i] # 1-D N-DUDE ### X_hat Mat ### X_hat_One_NN_DUDE = zeros((len(delta) * k_max, n)) X_hat_One_NN_DUDE_LB = X_hat_One_NN_DUDE.copy() for i in range(0, 1): print "##### delta=%0.2f #####" % delta[i] for k in range(1, k_max + 1): print 'k=', k ### 1-D N-DUDE ### C, Y = N_DUDE.make_data_for_rnn_n_dude( Z[i * n:(i + 1) * n], k, L_new[i * alpha_size:(i + 1) * alpha_size, ], nb_classes, n) C_left = np.split(C, 2, axis=1)[0] C_right = np.split(C, 2, axis=1)[1] left_context = Sequential() left_context.add( LSTM(dim, init='he_normal', return_sequences=False, input_shape=C_left.shape[1:])) right_context = Sequential() right_context.add( LSTM(dim, init='he_normal', return_sequences=False, go_backwards=True,
for i in range(0,1): #print "##### delta=%0.2f #####" % delta[i] for k in range(1, k_max+1): print "k =",k One_NN_Start=time.time() ### 1-D DUDE ### s_hat,m=DUDE.One_DUDE(z[0],k,delta[0]) x_dude_hat=DUDE.denoise_with_s(z[0],s_hat,k) error_dude=DUDE.error_rate(x,x_dude_hat) print '1-D DUDE =',error_dude Error_One_DUDE[0,k]=error_dude X_hat_One_DUDE[k_max*0+k-1,:]=x_dude_hat ### 1-D N-DUDE ### C,Y = N_DUDE.make_data_for_One_NN_DUDE(Z[i*n:(i+1)*n],k,L_new[i*alpha_size:(i+1)*alpha_size,],nb_classes,n) model=Sequential() model.add(Dense(40,input_dim=2*k*nb_classes,init='he_normal')) model.add(Activation('relu')) model.add(Dense(40,init='he_normal')) model.add(Activation('relu')) model.add(Dense(40,init='he_normal')) model.add(Activation('relu')) model.add(Dense(3,init='he_normal')) model.add(Activation('softmax')) rms=RMSprop(lr=0.001, rho=0.9, epsilon=1e-06,clipnorm=1.5) adagrad=Adagrad(clipnorm=1.5) adam=Adam() adadelta=Adadelta()
for k in range(1, k_max+1): print "k =",k One_NN_Start=time.time() ### 2-D DUDE ### Two_DUDE_Start=time.time() s_hat_two,m=DUDE.Two_DUDE(z_two[i],k,delta[i],n,offset) x_dude_hat_two=DUDE.denoise_with_s_Two_DUDE(z_two[i],s_hat_two,k) error_dude_two=DUDE.error_rate(x,x_dude_hat_two) print '2-D DUDE =',error_dude_two Error_Two_DUDE[i,k]=error_dude_two X_hat_Two_DUDE[k_max*i+k-1,:]=x_dude_hat_two ### 2-D N-DUDE ### C_two1,Y_two1 = N_DUDE.make_data_for_Two_NN_DUDE(P[0],Z[0*n:(0+1)*n],k, L_new[0*alpha_size:(0+1)*alpha_size,],nb_classes,n,offset) C_two2,Y_two2 = N_DUDE.make_data_for_Two_NN_DUDE(P[1],Z[1*n:(1+1)*n],k, L_new[1*alpha_size:(1+1)*alpha_size,],nb_classes,n,offset) C_two3,Y_two3 = N_DUDE.make_data_for_Two_NN_DUDE(P[2],Z[2*n:(2+1)*n],k, L_new[2*alpha_size:(2+1)*alpha_size,],nb_classes,n,offset) C_two4,Y_two4 = N_DUDE.make_data_for_Two_NN_DUDE(P[3],Z[3*n:(3+1)*n],k, L_new[3*alpha_size:(3+1)*alpha_size,],nb_classes,n,offset) C_two5,Y_two5 = N_DUDE.make_data_for_Two_NN_DUDE(P[4],Z[4*n:(4+1)*n],k, L_new[4*alpha_size:(4+1)*alpha_size,],nb_classes,n,offset) C_two6,Y_two6 = N_DUDE.make_data_for_Two_NN_DUDE(P[5],Z[5*n:(5+1)*n],k, L_new[5*alpha_size:(5+1)*alpha_size,],nb_classes,n,offset) C_two7,Y_two7 = N_DUDE.make_data_for_Two_NN_DUDE(P[6],Z[6*n:(6+1)*n],k, L_new[6*alpha_size:(6+1)*alpha_size,],nb_classes,n,offset) C_two8,Y_two8 = N_DUDE.make_data_for_Two_NN_DUDE(P[7],Z[7*n:(7+1)*n],k, L_new[7*alpha_size:(7+1)*alpha_size,],nb_classes,n,offset) C_two9,Y_two9 = N_DUDE.make_data_for_Two_NN_DUDE(P[8],Z[8*n:(8+1)*n],k,