Scalar test loss (if the model has no metrics) or list of scalars (if the model computes other metrics). The attribute `model.metrics_names` will give you the display labels for the scalar outputs. ''' Ypred, Y = self.predict(generator, n_eval) run_eval(Ypred, Y, l, pbFlag) def readT(rootpath, file_train): return open(rootpath+file_train, 'r').readlines() if __name__ == '__main__': forward_Model = MixtureModel() trainingT=readT(ROOTPATH,train_txt) forward_Model.fit(ROOTPATH, trainingT, test_txt,learning_rate=LEARNING_RATE, itmax=ITER,validation=validationRatio,subsampling=ssRatio) (_,_), (gen_test, N_test) = load_data_generator(ROOTPATH, train_txt[:], test_txt,validation=1.0,subsampling=ssRatio) forward_Model.evaluate((gen_test, N_test), WIDTH)
''' features_test, target_test = extract_XY_generator( self.network, generator, n_eval) gllim_predict = self.gllim.predict_high_low(features_test) run_eval(gllim_predict, target_test, l, pbFlag) if __name__ == '__main__': deep_gllim = DeepGllim(k=GLLIM_K, PCA=FEATURES_SIZE) train_txt = sys.argv[1] test_txt = sys.argv[2] (gen_training, N_train), (gen_test, N_test) = load_data_generator(ROOTPATH, train_txt, test_txt, gllim=None, FT=False) deep_gllim.fit((gen_training, N_train), learning_rate=LEARNING_RATE, it=ITER, f=train_txt) predictions = deep_gllim.predict((gen_test, N_test)) deep_gllim.evaluate((gen_test, N_test), WIDTH, PB_FLAG)
def fit(self, train_txt, test_txt, learning_rate=0.1, it=2): '''Trains the model for a fixed number of epochs and iterations. # Arguments X_train: input data, as a Numpy array or list of Numpy arrays (if the model has multiple inputs). Y_train : labels, as a Numpy array. batch_size: integer. Number of samples per gradient update. learning_rate: float, learning rate it: integer, number of iterations of the algorithm f: text file for responsability trick ''' start_time_training = time.time() print "Training Deep Gllim" (generator_training, n_train), (generator_val, n_val), (generator_test, n_test) = load_data_generator(ROOTPATH, train_txt, test_txt, validation=Valperc) print "n_train size:", n_train features_training, target_training = extract_XY_generator( self.network, generator_training, n_train) print "features size:", features_training.shape print "target size:", target_training.shape add_pca_layer(self.network, features_training, self.PCA) self.network.add(BatchNormalization()) for i in range(it): # Extract the features used to train the gllim layer features_training, target_training = extract_XY_generator( self.network, generator_training, n_train) self.gllim.fit(target_training, features_training, MAX_ITER_EM, (i == 0), None) # introduced now to evaluate the test set every iteration! # self.gllim.evaluate((gen_test, N_test), WIDTH) # inverse the gllim layer to perform forward evaluation self.gllim.inversion() print "VALIDATION SET:" self.evaluate((generator_val, n_val), WIDTH) print "TEST SET:" self.evaluate((generator_test, n_test), WIDTH) # perform the M-network step self.fine_tune(16, learning_rate, train_txt) # finish by a gllim update features_training, target_training = extract_XY_generator( self.network, generator_training, n_train) self.gllim.fit(target_training, features_training, MAX_ITER_EM, False, None) self.gllim.inversion() print "--- %s seconds for training Deep Gllim---" % ( time.time() - start_time_training)
for y in Ypred - Y: file.write(np.array_str(y, max_line_width=1000000) + "\n") if __name__ == '__main__': l2_Model = L2Model() # t=[lambda x:random_rotation(x,2.0,row_index=2,col_index=3,channel_index=1), # lambda x:random_shift(x,0.03,0.03,row_index=2,col_index=3,channel_index=1), # lambda x:random_zoom(x,0.05,row_index=2,col_index=3,channel_index=1)] # t=[lambda x:random_rotation(x,2.0,row_index=1,col_index=2,channel_index=0), # lambda x:random_shift(x,0.03,0.03,row_index=1,col_index=2,channel_index=0), # lambda x:random_zoom(x,[0.95,1.05],row_index=1,col_index=2,channel_index=0)] (gen_training, N_train), (gen_val, N_val), (gen_test, N_test) = load_data_generator(ROOTPATH, train_txt, test_txt, validation=0.8, subsampling=ssRatio, batch_size=BATCH_SIZE) l2_Model.fit((gen_training, N_train), (gen_val, N_val)) l2_Model.evaluate((gen_training, N_train), "training", 224) l2_Model.evaluate((gen_val, N_val), "validation", 224) l2_Model.evaluate((gen_test, N_test), "test", 224)
''' features_test, target_test = extract_XY_generator( self.network, generator, n_eval) gllim_predict = self.gllim.predict_high_low(features_test) run_eval(gllim_predict, target_test, l, pbFlag, printError=False) if __name__ == '__main__': deep_gllim = DeepGllim(k=GLLIM_K, PCA=FEATURES_SIZE) train_txt = sys.argv[2] test_txt = sys.argv[3] deep_gllim.fit(train_txt, test_txt, learning_rate=LEARNING_RATE, it=ITER) (generator_training, n_train), (generator_val, n_val), (gen_test, N_test) = load_data_generator(ROOTPATH, train_txt, test_txt, validation=Valperc) deep_gllim.evaluate((gen_test, N_test), WIDTH, pbFlag=PB_FLAG, printError=True)