def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config( "C:\\Users\\Sava\\Documents\\ESRGAN\\configs\\config.json") except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = session_initialiser() # create your data generator data = il.ImageLoader(config) # create an instance of the model you want model = ESRGAN(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = ESRGANTrainer(sess, model, data, config, logger) # load model if exists model.load(sess) # here you train your model trainer.train()
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create an instance of the model you want model = MyModel(config) # create your data generator data = DataGenerator(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = SimpleTrainer(sess, model, data, config, logger) # here you train your model trainer.train() trainer.validate()
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) print('Create the data generator.') data_generator = DataGenerator(config) print('Create the model.') model = ComplexConvModel(config, data_generator.get_word_index()) print('Create the trainer') trainer = MultiLabelConvModelTrainer(model.model, data_generator.get_train_data(), config) print('Start training the model.') trainer.train() print('Visualize the losses') trainer.visualize()
def main(): # capture the config path from the run arguments # then process the json configuration fill args = get_args() config = process_config(args.config) if hasattr(config,"comet_api_key"): from comet_ml import Experiment # create the experiments dirs create_dirs([ config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir, config.preprocessor.data_dir]) print('Creating the preprocessor.') preprocessor = factory.create("preprocessors."+config.preprocessor.name)(config) preprocessor.preprocess() print('Create the data generator.') data_loader = factory.create("data_loaders."+config.data_loader.name)(config) print('Create the model.') model = factory.create("models."+config.model.name)(config) print('Create the trainer') trainer = factory.create("trainers."+config.trainer.name)(model.model, data_loader.get_train_data(), config) print('Start training the model.') trainer.train()
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create an instance of the model you want model = MyModel(config) # create your data generator data = DataGenerator(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = SimpleTrainer(sess, model, data, config, logger) saverExternal = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='external')) saverExternal.restore(sess, "experiments/model_8/model8.ckpt_2") # here you train your model trainer.train() trainer.validate()
def black_box_function(learning_rate): # print(os.getcwd()) # print(args.config) args = get_args() config = process_config_UtsClassification_bayes_optimization(args.config, learning_rate) create_dirs([config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir, config.log_dir, config.result_dir]) print('Create the data generator.') data_loader = UtsClassificationDataLoader(config) print('Create the model.') model = UtsClassificationModel(config, data_loader.get_inputshape(), data_loader.get_nbclasses()) print('Create the trainer') trainer = UtsClassificationTrainer(model.model, data_loader.get_train_data(), config) print('Start training the model.') trainer.train() print('Create the evaluater.') evaluater = UtsClassificationEvaluater(trainer.best_model, data_loader.get_test_data(), data_loader.get_nbclasses(), config) print('Start evaluating the model.') evaluater.evluate() print('done') return evaluater.f1
def run_multi(): # Get the arguments args = get_args() config, _ = get_config_from_json(args.config) values_xx = config.exp.vals_0 values_zz = config.exp.vals_0 params = config.exp.params section = config.exp.section # Spectral Normalization for i in values_xx: # Mode for j in values_zz: config[section][params[0]] = i config[section][params[1]] = j config.exp.name = args.experiment + "_{}_{}".format(i, j) process_config(config) create_dirs([ config.log.summary_dir, config.log.checkpoint_dir, config.log.step_generation_dir, config.log.log_file_dir, config.log.codebase_dir, ]) # Copy the model code and the trainer code to the experiment folder run(config, args) tf.reset_default_graph()
def main(): # capture the config path from the run arguments # then process the json configuration fill try: args = get_args() config = process_config(args.config) # create the experiments dirs create_dirs([ config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir ]) print('Create the data generator.') data_loader = factory.create("data_loader." + config.data_loader.name)(config) print('Create the model.') model = factory.create("models." + config.model.name)(config) print('Create the trainer') trainer = factory.create("trainers." + config.trainer.name)( model.model, data_loader.get_train_data(), config) print('Start training the model.') trainer.train() except Exception as e: print(e) sys.exit(1)
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([ config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir ]) print('Create the data generator.') data_loader = Stl10DataLoader(config) print('Create the model.') model = MobilenetModel(config) print('Create the trainer') trainer = GeneratorModelTrainer(model.model, data_loader.train_generator, data_loader.test_generator, config) print('Start training the model.') trainer.train()
def main(): # capture the config path from the run arguments # then process the json configuration file config = process_config("configs/interval_neural_net.json") # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create your data generator data = DataGenerator(config, ConstWidth()) # create an instance of the model you want model = ExampleModel_deeper(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = ExampleTrainer(sess, model, data, config, logger) # load model if exists model.load(sess) # here you train your model t = time.time() trainer.train() elapsed = time.time() - t print("".join(["Elapsed time: ", str(elapsed)])) plotmaker = PlotMaker(trainer, model, data) plotmaker.write_all_plots("figures/experiment1_incertitude_deeper") plotmaker.write_results_json("figures/experiment1_incertitude_deeper")
def main(): # capture the config path from the run arguments # then process the json configration file try: args = get_args() print(args) config = process_config(args.config) # print(config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create instance of the model you want model = ExampleModel(config) #load model if exist model.load(sess) # create your data generator data = DataGenerator(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and path all previous components to it trainer = ExampleTrainer(sess, model, data, config, logger) # here you train your model trainer.train()
def main(): # capture the config path from the run arguments # then process the json configration file try: args = get_args(configfile) config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([ config.summary_dir, config.checkpoint_dir, config.dic_dir, config.result_dir ]) # create tensorflow session os.environ["CUDA_VISIBLE_DEVICES"] = "0" gpu_config = tf.ConfigProto() gpu_config.allow_soft_placement = True gpu_config.gpu_options.allow_growth = True sess = tf.Session(config=gpu_config) # create your data generator data = DialogueDataGenerator(config) # create instance of the model you want # create tensorboard logger # create trainer and path all previous components to it model = SCNRMAModel(config, data) logger = Logger(sess, config) trainer = SCNRMATrainer(sess, model, data, config, logger) # here you train your model trainer.do_test()
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create your data generator data = DataGenerator(config) # use mnist dataset data.load_mnist() # create an instance of the cnn model model = CnnMnistModel(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = CnnMnistTrainer(sess, model, data, config, logger) #load model if exists model.load(sess) # here you train your model trainer.train()
def main(configfile): # capture the config path from the run arguments # then process the json configuration file try: config = process_config(configfile) except Exception as e: print("missing or invalid arguments {}".format(e)) exit(0) config.device = "cuda" if torch.cuda.is_available() else "cpu" torch.manual_seed(100) np.random.seed(100) print("lr = {0}".format(config.hyperparams.learning_rate)) print("decay = {0}".format(config.hyperparams.decay_rate)) print(config.architecture) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) doc_utils.doc_used_config(config) for exp in range(1, config.num_exp + 1): print("Experiment num = {}\n".format(exp)) data = DataGenerator(config, ) # create a data generator model_wrapper = ModelWrapper( config, data) # create an instance of the model you want trainer = Trainer( model_wrapper, data, config ) # create trainer and pass all the previous components to it trainer.train() # here you train your model trainer.test() # here you test your model
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create tensorflow session sess = tf.Session() # create an instance of the model you want model = MyModel(config) # create your data generator data = DataGenerator(config) # create tensorboard logger logger = Logger(sess, config) # create trainer and pass all the previous components to it trainer = SimpleTrainer(sess, model, data, config, logger) saverExternal = tf.train.Saver(var_list=tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope='external')) saverExternal.restore(sess, "experiments/model_8/model8.ckpt_2") # here you train your model trainer.train() trainer.validate()
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir]) print('Create the data generator.') data_loader = Stl10LogitsLoader(config) print('Create the model.') #model = XceptionModel(config) #model.load("datasets/model_data/xception.h5") model = load_model("datasets/model_data/xception.h5") model.layers.pop() model = Model(model.input, model.layers[-1].output) print('Get Logits.') batches = 0 train_logits = {} for x_batch, y_batch, name_batch in tqdm(data_loader.train_generator): batch_logits = model.predict_on_batch(x_batch) for i, n in enumerate(name_batch): #print(n) train_logits[n] = softmax(batch_logits[i]) batches += 1 if batches >= 5000//50: # 5000/64 break np.save('train_logits.npy', train_logits) print(train_logits[0]) batches = 0 test_logits = {} numb = 0 for x_batch, _, name_batch in tqdm(data_loader.test_generator ): batch_logits = model.predict_on_batch(x_batch) for i, n in enumerate(name_batch): test_logits[n] = softmax(batch_logits[i]) batches += 1 if batches >= 3000//50: # 3000/64 break np.save('test_logits.npy', test_logits) print(test_logits[0])
def main(): os.environ['CUDA_VISIBLE_DEVICES'] = '2' try: RECEIVED_PARAMS = nni.get_next_parameter() LOG.debug(RECEIVED_PARAMS) PARAMS = generate_default_params() PARAMS.update(RECEIVED_PARAMS) args = get_args() config = process_config_UtsClassification_bayes_optimization( args.config, PARAMS) # except: # print("missing or invalid arguments") # exit(0) # create the experiments dirs create_dirs([ config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir, config.log_dir, config.result_dir ]) print('Create the data generator.') data_loader = UtsClassificationDataLoader(config) print('Create the model.') model = UtsClassificationModel(config, data_loader.get_inputshape(), data_loader.get_nbclasses()) print('Create the trainer') trainer = UtsClassificationTrainer(model.model, data_loader.get_train_data(), config) print('Start training the model.') trainer.train() print('Create the evaluater.') evaluater = UtsClassificationEvaluater(trainer.best_model, data_loader.get_test_data(), data_loader.get_nbclasses(), config) print('Start evaluating the model.') evaluater.evluate() nni.report_final_result(evaluater.f1) print('done') except Exception as e: LOG.exception(e) raise
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([ config.callbacks.tensorboard_log_dir, config.callbacks.checkpoint_dir ]) print('Create the data generator.') data_loader = SoluteDataLoader(config) # iterate over hyperparamters, and log each run for comparison. add hyper parmeters to model function lr = [] reg = [] val_acc = [] val_loss = [] for k in range(config.param_optim.num_of_itr_random): print('Create the model.') model = SoluteModelOptim(config) print('Create the trainer') trainer = SoluteModelTrainerOptim(model.model, data_loader.get_train_data(), config) print('Start training the model.') trainer.train() l1_ks, l1_d, l2_ks, l2_d, dense_num_neuron, learning_rate, reg_l2 = model.get_model_params( ) loss, acc, val_loss, val_acc = trainer.get_train_log() param_list = [ l1_ks, l1_d, l2_ks, l2_d, dense_num_neuron, learning_rate, reg_l2 ] metric_list = [loss, acc, val_loss, val_acc] metric_list = zip(*metric_list) csvfile = 'experiments/log_{0:.2}.csv'.format(val_acc[-1]) with open(csvfile, "w") as output: writer = csv.writer(output, lineterminator='\n') writer.writerow([ 'l1_ks', 'l1_d', 'l2_ks', 'l2_d', 'dense_num_neuron', 'learning_rate', 'reg_l2' ]) writer.writerows([param_list]) writer.writerow(['loss', 'accuracy', 'val_loss', 'val_accuracy']) writer.writerows(metric_list) print('itr {0}'.format(k))
def main(): args = get_args() #model_name = 'nodepert_ae5_sgd_correctgeom' model_name = 'nodepert_ae5_faauto' #model_name = 'nodepert_ae5_bpauto' #Model = AENPModel5 #Model = AENPModel5_ExactLsq #Model = AENPModel5_ExactLsq_BPAuto #Model = AENPModel5_ExactLsq_BPSelf Model = AENPModel5_ExactLsq_FAAuto #Model = AENPModel5_ExactLsq_FASelf Data = MNISTDataGenerator Trainer = AESFTrainer config = process_config('./configs/np_optimized.json', model_name) create_dirs([config.summary_dir, config.checkpoint_dir]) #var_vals = [1e-2] N = 1 #M = 5 M = 10 T = config.num_epochs + 1 n_tags = 13 test_losses = np.zeros((N, M)) isnan = np.zeros((N, M)) metrics = np.zeros((N, M, T, n_tags)) for n in range(N): tf.reset_default_graph() model = Model(config) data = Data(config) for m in range(M): with tf.Session() as sess: logger = LoggerNumpy(sess, config, model) model.load(sess) trainer = Trainer(sess, model, data, config, logger) try: trainer.train() except ValueError: print("Method fails to converge for these parameters") isnan[n, m] = 1 loss, acc = trainer.test() metric = logger.get_data() tags = logger.get_tags() test_losses[n, m] = loss metrics[n, m, :, :] = metric #Save after each run fn = os.path.join( config.summary_dir) + "3_autoencoder_correctbatch.npz" to_save = { 'test_losses': test_losses, 'metrics': metrics, 'isnan': isnan, 'tags': tags } pickle.dump(to_save, open(fn, "wb")) return metrics
def main(): # 获取配置文件路径 # 运行:python tsic.py -c configs/tsic_who_config.json # python tsic.py -c configs/tsic_ed_config.json try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) create_dirs([]) for Fusion in S: print('Create the data generator.') train_dataset = MRIData(config, train=True) test_dataset = MRIData(config, train=False) # printData(train_dataset, type='normal') # printData(test_dataset, type='normal') # 获取训练集和测试集 train_data, train_label = train_dataset.getData(train=True, array=True) test_data, test_label = test_dataset.getData(train=False, array=True) print(type(train_data), type(train_label), train_data.shape, train_label.shape) # 截取E、F、G、H、I、J print(train_data[:, :, :, 3:9].shape) train_data = train_data[:, :, :, 3:9] #36*16*16*6 test_data = test_data[:, :, :, 3:9] # # 调整维度:样本-时间序列-宽-高 train_data = train_data.transpose((0, 3, 1, 2)) test_data = test_data.transpose((0, 3, 1, 2)) train_data_shape = train_data.shape test_data_shape = test_data.shape print(train_data_shape, test_data_shape) # 可视化图像 view_img(train_data, (16, 16, 3), (6, 6), str(Fusion)) # 将宽-高拉长成一维 train_data = train_data.reshape(train_data_shape[0], train_data_shape[1], -1) test_data = test_data.reshape(test_data_shape[0], test_data_shape[1], -1) print(train_data.shape, test_data.shape) # 计算每个序列平均灰度值 train_mean, test_mean = train_data.mean(2), test_data.mean(2) # 合并训练集和测试集 data_mean = np.concatenate((train_mean, test_mean), axis=0) label = np.concatenate((train_label, test_label), axis=0) # 绘制时间-强度曲线 plot_tsic(data_mean, label, classes=config.classes_name)
def main(): args = get_args() model_name = 'nodepert4_fixedw_exact' Model = NPModel4_ExactLsq Data = MNISTDataGenerator Trainer = SFTrainer config = process_config('./configs/np.json', model_name) create_dirs([config.summary_dir, config.checkpoint_dir]) #Param search parameters attr = ['var_xi'] #var_vals = [1e-3, 1e-2, 1e-1, 1, 10] var_vals = [1e-1] N = len(var_vals) M = 1 T = config.num_epochs + 1 n_tags = 8 test_losses = np.zeros((N, M)) isnan = np.zeros((N, M)) metrics = np.zeros((N, M, T, n_tags)) for n in range(N): var_val = [var_vals[n]] set_hyperparameters(config, attr, var_val) model = Model(config) data = Data(config) print('Hyperparameters: ' + attr[0] + ' = %f' % var_vals[n]) for m in range(M): with tf.Session() as sess: logger = LoggerNumpy(sess, config, model) model.load(sess) trainer = Trainer(sess, model, data, config, logger) try: trainer.train() except ValueError: print("Method fails to converge for these parameters") isnan[n, m] = 1 loss, acc = trainer.test() metric = logger.get_data() tags = logger.get_tags() test_losses[n, m] = loss metrics[n, m, :] = metric fn = os.path.join( config.summary_dir) + "2_establish_convergence_feedforward_output.npz" to_save = { 'test_losses': test_losses, 'metrics': metrics, 'isnan': isnan, 'tags': tags } pickle.dump(to_save, open(fn, "wb")) #np.savez(fn, test_losses=test_losses, metrics = metrics, isnan = isnan, tags = tags) return metrics
def main(): args = get_args() # Set random seeds np.random.seed(args.seed) tf.set_random_seed(args.seed) # config is of type Munch config = process_config(args.config) create_dirs([config.summary_dir, config.checkpoint_dir]) sconfig = tf.ConfigProto() sconfig.gpu_options.allow_growth = True sess = tf.Session(config=sconfig) with tf.variable_scope("model"): model = SPEN(config) if config.data.embeddings is True: # This is outside data generator since it's used to explicitly init TF model embeddings = load_embeddings(config) logger.info("embeddings loaded :- %d items", len(embeddings)) else: embeddings = None if config.data.vocab is True: # Load the two vocabulary files for types and entities types, types_vocab, entities, entitites_vocab = load_vocab(config) logger.info("vocab loaded :- %d types, %d entities", len(types), len(entities)) generator = eval("data_generator.%s" % config.data.data_generator) dsplits = config.data.splits train_data = generator(config, split=dsplits[0]) logger.info("training set loaded :- %d instances", train_data.len) dev_data = generator(config, split=dsplits[1]) logger.info("dev set loaded :- %d instances", dev_data.len) test_data = generator(config, split=dsplits[2]) logger.info("test set loaded :- %d instances", test_data.len) # create tensorboard logger tf_logger = TFLogger(sess, config) # create trainer and path all previous components to it trainer = SpenTrainer(sess, model, [train_data, dev_data, test_data], embeddings, config, tf_logger) # Inference Net pre-training trainer.train(stage=0) # This is needed to keep a copy of the pre-trained infnet trainer.copy_infnet() # Energy Network Minimization trainer.train(stage=1) # Inference Network post-training trainer.train(stage=2)
def main(): args = get_args() model_name = 'feedbackalignment4_small' Model = FAModel4_Small #Model = FAModel4 Data = MNISTDataGenerator Trainer = SFTrainer config = process_config('./configs/sf.json', model_name) create_dirs([config.summary_dir, config.checkpoint_dir]) N = 1 M = 5 T = config.num_epochs + 1 n_tags = 10 test_losses = np.zeros((N, M)) isnan = np.zeros((N, M)) metrics = np.zeros((N, M, T, n_tags)) tfconfig = tf.ConfigProto() tfconfig.gpu_options.allow_growth = True n = 0 model = Model(config) data = Data(config) for m in range(M): with tf.Session(config=tfconfig) as sess: logger = LoggerNumpy(sess, config, model) model.load(sess) trainer = Trainer(sess, model, data, config, logger) try: trainer.train() except ValueError: print("Method fails to converge for these parameters") isnan[n, m] = 1 loss, acc = trainer.test() metric = logger.get_data() tags = logger.get_tags() test_losses[n, m] = loss metrics[n, m, :, :] = metric #Save after each run fn = os.path.join( config.summary_dir ) + "2b_establish_convergence_feedforward_feedbackalignment_output.npz" to_save = { 'test_losses': test_losses, 'metrics': metrics, 'isnan': isnan, 'tags': tags } pickle.dump(to_save, open(fn, "wb")) return metrics
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([ os.path.join('/scratch/users/thomaslj/autogen_files', config.model.bash_name, config.callbacks.tensorboard_log_dir), os.path.join('/scratch/users/thomaslj/autogen_files', config.model.bash_name, config.callbacks.checkpoint_dir) ]) print('Create the data generator.') logger('Creating data generators ...'.format(datetime.now())) data_loader = { 'train': factory.create("data_loader." + config.data_loader.name)( config, subset='train', shuffle=True), 'test': factory.create("data_loader." + config.data_loader.name)( config, subset='test'), 'eval': factory.create("data_loader." + config.data_loader.name)(config, subset='eval') } print('Create the model.') model = TemporalClustering(config) logger('Creating the trainer ...'.format(datetime.now())) if config.model.num_gpus > 1: trainer = factory.create("trainers." + config.trainer.name)( model.parallel_model, data_loader, config) else: trainer = factory.create("trainers." + config.trainer.name)( model.model, data_loader, config) print('Start training the model.') trainer.train() print('predicting the autoencoder') trainer.predict_autoencoder() print('Computing the clustering layer.') data_loader['train'].model_type = data_loader[ 'test'].model_type = data_loader['eval'].model_type = 'clustering' trainer.data_groups['train_data'].model_type = trainer.data_groups['test_data'].model_type = \ trainer.data_groups['eval_data'].model_type = trainer.eval_data.model_type = trainer.test_data.model_type = \ trainer.test_data.model_type = 'clustering' clust_layer = trainer.clustering_layer()
def main(): # capture the config path from the run arguments # then process the json configuration file # args = get_args() config = process_config("../configs/train_config.json") # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) """
def get_tensorboard_logger( board_path: str, log_freq: int, create_dirs_flag: bool) -> tf.keras.callbacks.Callback: if create_dirs_flag: create_dirs([board_path]) return tf.keras.callbacks.TensorBoard(log_dir=board_path, update_freq='epoch', histogram_freq=log_freq, write_images=True, embeddings_freq=log_freq)
def saveWordPairs(self,folder): create_dirs([folder]) for filename in self.allPairsByFile: lang1,lang2= [lang for lang in self.allPairsByFile[filename]] filepath= folder+os.path.basename(filename)+".txt" filepath= filepath.replace(".txt.txt",".txt") with open(filepath,"w") as f: for i in range(self.file2length[filename]): token1 = self.allPairsByFile[filename][lang1][i] token2 = self.allPairsByFile[filename][lang2][i] f.write("{} {}\n".format(token1,token2))
def process_config(json_file): config, _ = get_config_from_json(json_file) config.summary_dir = os.path.join("./experiments", config.exp_name, "summary/") config.checkpoint_dir = os.path.join("./experiments", config.exp_name, "checkpoint/") config.final_model_dir = os.path.join("./experiments", config.exp_name, "saved_model/") config.images_dir = os.path.join("./experiments", config.exp_name, "images/") create_dirs([config.summary_dir, config.checkpoint_dir, config.images_dir]) return config
def process_config(json_file, create_folders=True): """ Get the json file Processing it with EasyDict to be accessible as attributes then editing the path of the experiments folder creating some important directories in the experiment folder Then setup the logging in the whole program Then return the config :param json_file: the path of the config file :return: config object(namespace) """ config, _ = get_config_from_json(json_file) print(" THE Configuration of your experiment ..") pprint(config) # making sure that you have provided the exp_name. try: print(" *************************************** ") print("The experiment name is {}".format(config.exp_name)) print(" *************************************** ") except AttributeError: print("ERROR!!..Please provide the exp_name in json file..") exit(-1) epoch_time = int(time.time()) print("create folders", create_folders) if create_folders: folder_name = os.path.join("experiments", f"{config.exp_name}_{epoch_time}") else: folder_name = "/tmp/last-experiment" # create some important directories to be used for that experiment. config.summary_dir = os.path.join(folder_name, "summaries/") config.checkpoint_dir = os.path.join(folder_name, "checkpoints/") config.out_dir = os.path.join(folder_name, "out/") config.log_dir = os.path.join(folder_name, "logs/") print(config.summary_dir) create_dirs([ config.summary_dir, config.checkpoint_dir, config.out_dir, config.log_dir ]) # setup logging in the project setup_logging(config.log_dir) config.dry_run = "DRY_RUN" in environ and environ["DRY_RUN"] == "1" if config.dry_run: config.pct_data = 0.01 config.use_valid = "USE_VALID" in environ return config
def test_create_dirs(self): import os from utils.dirs import create_dirs dirs = os.path.join(os.path.abspath('.'), 'tests/test_dir') create_dirs(dirs) self.assertTrue(os.path.exists(dirs)) dirs = [ os.path.join(os.path.abspath('.'), 'tests/test1_dir'), os.path.join(os.path.abspath('.'), 'tests/test2_dir') ] create_dirs(dirs) for dir_ in dirs: self.assertTrue(os.path.exists(dir_))
def eval(config): # build data_obj data_obj = config.Data(config) setattr(config, "data_obj", data_obj) setattr(config, "mode", 'valid') # create the experiments dirs create_dirs([config.summary_dir, config.checkpoint_dir]) # create an instance of the model you want model = config.Model(config) # create trainer and pass all the previous components to it evaler = config.Evaler(model, config, gpu_config) # here you train your model evaler.eval()
def main(): # capture the config path from the run arguments # then process the json configration file #try: args = get_args() #print(args) config, config_induce, config_cla = process_config(args.config) # set visible device os.environ["CUDA_VISIBLE_DEVICES"] = ", ".join(args.gpu_name) # fix random seeds tf.set_random_seed(1) np.random.seed(1) tf.logging.set_verbosity(0) create_dirs([config.summary_dir, config.img_dir]) # define external summary for classification cla_summary_writer = tf.summary.FileWriter(config.summary_dir) # run induce function step, flag = 0, True while step < config_induce.num_training_steps: if flag: num_steps, flag = 0, False else: num_steps = min(config.num_steps_per_iter, config_induce.num_training_steps - step) print("Build and train inducer...") tf.reset_default_graph() x_pseudo, y_pseudo = build_and_train_inducer(num_steps, config_induce) step += num_steps plot_images_and_labels(x_pseudo, y_pseudo, 'pseudo_' + str(step), config.img_dir) print("Build and train classifier...") tf.reset_default_graph() loss, error = build_and_train_classifier(config_cla.num_training_steps, config_cla) cla_loss_summary = tf.Summary() cla_error_summary = tf.Summary() cla_loss_summary.value.add(tag="test/loss", simple_value=loss) cla_error_summary.value.add(tag="test/error", simple_value=error) cla_summary_writer.add_summary(cla_loss_summary, step) cla_summary_writer.add_summary(cla_error_summary, step) cla_summary_writer.flush()
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.tensorboard_log_dir, config.checkpoint_dir, "val_test"]) print('Create the data generator.') if hasattr(config, "data_set"): if config.data_set == "face_data_77": data_loader = FaceLandmark77DataLoader(config) else: data_loader = SimpleMnistDataLoader(config) else: data_loader = SimpleMnistDataLoader(config) print('Create the model.') if hasattr(config, "model_name"): if config.model_name == "mobile_net": model = MobileNetV2Model(config) else: model = SimpleMnistModel(config) else: model = SimpleMnistModel(config) print(model.model.input_names) print([out.op.name for out in model.model.outputs]) return if hasattr(config, "best_checkpoint"): model.load(config.best_checkpoint) frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in model.model.outputs]) ckpt_path = Path(config.best_checkpoint) tf.train.write_graph(frozen_graph, str(ckpt_path.parent), ckpt_path.with_suffix(".pb").name, as_text=False)
def main(): # capture the config path from the run arguments # then process the json configuration file try: args = get_args() config = process_config(args.config) except: print("missing or invalid arguments") exit(0) # create the experiments dirs create_dirs([config.tensorboard_log_dir, config.checkpoint_dir]) print('Create the data generator.') if hasattr(config, "data_set"): if config.data_set == "face_data_77": data_loader = FaceLandmark77DataLoader(config) else: data_loader = SimpleMnistDataLoader(config) else: data_loader = SimpleMnistDataLoader(config) print('Create the model.') if hasattr(config, "model_name"): if config.model_name == "mobile_net": model = MobileNetV2Model(config) else: model = SimpleMnistModel(config) else: model = SimpleMnistModel(config) print('Create the trainer') trainer = SimpleMnistModelTrainer(model.model, data_loader, config) print('Start training the model.') trainer.train()