def train_vqvae(params, dataset, checkpoint_dir): logger.set_logger_dir(checkpoint_dir) dataset_params = params['dataset'] model_params = params['model'] trainer_params = params['trainer'] image_shape = model_params['image_shape'] train_ds, val_ds, sample_train, sample_test = load_toy_dataset( dataset, trainer_params['batch_size'], trainer_params['num_parallel']) params.to_file(os.path.join(logger.get_logger_dir(), 'config.json')) model = BaseVQVAE.from_params(model_params) trainer_config = AutoResumeTrainConfig( always_resume=recover, model=model, dataflow=train_ds, callbacks=[ Reconstruct(model, sample_train, sample_test, os.path.join(checkpoint_dir, 'images')), ModelSaver(max_to_keep=5, checkpoint_dir=checkpoint_dir), InferenceRunner(input=val_ds, infs=ScalarStats(['loss', 'perplexity'])), MaxSaver(monitor_stat='validation_loss'), CompressResource(os.path.join(checkpoint_dir, 'images'), os.path.join(checkpoint_dir, 'images.zip')) ], steps_per_epoch=trainer_params['steps_per_epoch'], max_epoch=trainer_params['max_epochs'] ) launch_train_with_config(trainer_config, SimpleTrainer())
def get_config(model): nr_tower = max(get_num_gpu(), 1) batch = args.batch // nr_tower logger.info("Running on {} towers. Batch size per tower: {}".format(nr_tower, batch)) callbacks = [ThroughputTracker(args.batch)] if args.fake: data = QueueInput(FakeData( [[batch, 224, 224, 3], [batch]], 1000, random=False, dtype='uint8')) else: data = QueueInput( get_imagenet_dataflow(args.data, 'train', batch), # use a larger queue queue=tf.FIFOQueue(200, [tf.uint8, tf.int32], [[batch, 224, 224, 3], [batch]]) ) BASE_LR = 30 SCALED_LR = BASE_LR * (args.batch / 256.0) callbacks.extend([ ModelSaver(), EstimatedTimeLeft(), ScheduledHyperParamSetter( 'learning_rate', [ (0, SCALED_LR), (60, SCALED_LR * 1e-1), (70, SCALED_LR * 1e-2), (80, SCALED_LR * 1e-3), (90, SCALED_LR * 1e-4), ]), ]) dataset_val = get_imagenet_dataflow(args.data, 'val', 64) infs = [ClassificationError('wrong-top1', 'val-error-top1'), ClassificationError('wrong-top5', 'val-error-top5')] if nr_tower == 1: callbacks.append(InferenceRunner(QueueInput(dataset_val), infs)) else: callbacks.append(DataParallelInferenceRunner( dataset_val, infs, list(range(nr_tower)))) if args.load.endswith(".npz"): # a released model in npz format init = SmartInit(args.load) else: # a pre-trained checkpoint init = SaverRestore(args.load, ignore=("learning_rate", "global_step")) return TrainConfig( model=model, data=data, callbacks=callbacks, steps_per_epoch=100 if args.fake else 1281167 // args.batch, session_init=init, max_epoch=100, )
def train_pixelcnn_prior(params, checkpoint_dir, recover=True, force=False): if force and os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir) logger.set_logger_dir(checkpoint_dir) dataset_params = params['dataset'] model_params = params['model'] trainer_params = params['trainer'] train_ds, val_ds, sample_train, sample_val, sample_train_label, \ sample_val_label = get_dataflow( dataset_params['path'], False, dataset_params['train_val_split'], trainer_params['batch_size'], trainer_params['num_parallel']) vqvae_checkpoint_path = trainer_params['vqvae_checkpoint_path'] vqvae_config_path = os.path.join(os.path.split(vqvae_checkpoint_path)[0], 'config.json') model_params['vqvae_model_params'] = vqvae_config_path latent_shape = model_params['latent_shape'] num_labels = model_params['num_labels'] params.to_file(os.path.join(logger.get_logger_dir(), 'config.json')) model = BasePixelCNNPrior.from_params(model_params) trainer = SyncMultiGPUTrainerParameterServer( gpus=trainer_params['num_gpus'], ps_device=None) trainer_config = AutoResumeTrainConfig( always_resume=recover, model=model, dataflow=train_ds, callbacks=[ SequentialSampling(trainer_params['num_examples_to_generate'], latent_shape, num_labels, model, os.path.join(checkpoint_dir, 'images')), Reconstruct(model, sample_train, sample_val, os.path.join(checkpoint_dir, 'images'), sample_train_label, sample_val_label), ModelSaver(max_to_keep=5, checkpoint_dir=checkpoint_dir), InferenceRunner(input=val_ds, infs=ScalarStats(['loss'])), MinSaver(monitor_stat='validation_loss'), CompressResource(os.path.join(checkpoint_dir, 'images'), os.path.join(checkpoint_dir, 'images.zip')), RestoreWeights(vqvae_checkpoint_path), Notification('Training status', 'Complete') ], steps_per_epoch=trainer_params['steps_per_epoch'], max_epoch=trainer_params['max_epochs'] ) launch_train_with_config(trainer_config, trainer)
def run_once(self, opt, sess_init=None, save_dir=None): #### train_datagen = self.get_datagen(opt['train_batch_size'], mode='train') valid_datagen = self.get_datagen(opt['infer_batch_size'], mode='valid') ###### must be called before ModelSaver if save_dir is None: logger.set_logger_dir(self.save_dir) else: logger.set_logger_dir(save_dir) ###### model_flags = opt['model_flags'] model = self.get_model()(**model_flags) ###### callbacks = [ ModelSaver(max_to_keep=1, keep_checkpoint_every_n_hours=None), ] for param_name, param_info in opt['manual_parameters'].items(): model.add_manual_variable(param_name, param_info[0]) callbacks.append( ScheduledHyperParamSetter(param_name, param_info[1])) # multi-GPU inference (with mandatory queue prefetch) infs = [StatCollector()] callbacks.append( DataParallelInferenceRunner(valid_datagen, infs, list(range(nr_gpus)))) if self.model_mode == 'seg_gland': callbacks.append(MaxSaver('valid_dice_obj')) elif self.model_mode == 'seg_nuc': callbacks.append(MaxSaver('valid_dice_np')) else: callbacks.append(MaxSaver('valid_auc')) ###### steps_per_epoch = train_datagen.size() // nr_gpus config = TrainConfig( model=model, callbacks=callbacks, dataflow=train_datagen, steps_per_epoch=steps_per_epoch, max_epoch=opt['nr_epochs'], ) config.session_init = sess_init launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpus)) tf.reset_default_graph( ) # remove the entire graph in case of multiple runs return
def get_config(model): input_sig = model.get_input_signature() nr_tower = max(hvd.size(), 1) batch = args.batch // nr_tower logger.info("Running on {} towers. Batch size per tower: {}".format( nr_tower, batch)) callbacks = [ThroughputTracker(args.batch), UpdateMomentumEncoder()] if args.fake: data = QueueInput( FakeData([x.shape for x in input_sig], 1000, random=False, dtype='uint8')) else: zmq_addr = 'ipc://@imagenet-train-b{}'.format(batch) data = ZMQInput(zmq_addr, 25, bind=False) dataset = data.to_dataset(input_sig).repeat().prefetch(15) dataset = dataset.apply( tf.data.experimental.prefetch_to_device('/gpu:0')) data = TFDatasetInput(dataset) callbacks.extend([ ModelSaver(), EstimatedTimeLeft(), ]) if not args.v2: # step-wise LR in v1 SCALED_LR = BASE_LR * (args.batch / 256.0) callbacks.append( ScheduledHyperParamSetter('learning_rate', [(0, min(BASE_LR, SCALED_LR)), (120, SCALED_LR * 1e-1), (160, SCALED_LR * 1e-2)])) if SCALED_LR > BASE_LR: callbacks.append( ScheduledHyperParamSetter('learning_rate', [(0, BASE_LR), (5, SCALED_LR)], interp='linear')) return TrainConfig( model=model, data=data, callbacks=callbacks, steps_per_epoch=100 if args.fake else 1281167 // args.batch, max_epoch=200, )
def train_image_embedding_softmax(params, checkpoint_dir, recover=True, force=False): if force and os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir) logger.set_logger_dir(checkpoint_dir) dataset_params = params['dataset'] model_params = params['model'] trainer_params = params['trainer'] train_ds, val_ds, _, _, _, _ = get_dataflow( dataset_params['path'], False, dataset_params['train_val_split'], trainer_params['batch_size'], trainer_params['num_parallel']) vqvae_checkpoint_path = trainer_params['vqvae_checkpoint_path'] vqvae_config_path = os.path.join(os.path.split(vqvae_checkpoint_path)[0], 'config.json') model_params['vqvae_model_params'] = vqvae_config_path params.to_file(os.path.join(logger.get_logger_dir(), 'config.json')) model = BaseImageEmbedding.from_params(model_params) trainer = SyncMultiGPUTrainerParameterServer( gpus=trainer_params['num_gpus'], ps_device=None) trainer_config = AutoResumeTrainConfig( always_resume=recover, model=model, dataflow=train_ds, callbacks=[ InferenceRunner(input=val_ds, infs=[ ScalarStats('loss'), ClassificationError('correct_prediction', 'val-correct_prediction')]), ModelSaver(max_to_keep=5, checkpoint_dir=checkpoint_dir), MinSaver(monitor_stat='val-correct_prediction'), RestoreWeights(vqvae_checkpoint_path), SendStat('Training status', [ 'loss', 'accuracy', 'validation_loss', 'val-correct_prediction'], after_every=2), Notification('Training status', 'Complete') ], steps_per_epoch=trainer_params['steps_per_epoch'], max_epoch=trainer_params['max_epochs'] ) launch_train_with_config(trainer_config, trainer)
def run_once(self, opt, sess_init=None, save_dir=None): #### train_datagen = self.get_datagen(opt["train_batch_size"], mode="train") valid_datagen = self.get_datagen(opt["infer_batch_size"], mode="valid") ###### must be called before ModelSaver if save_dir is None: logger.set_logger_dir(self.save_dir) else: logger.set_logger_dir(save_dir) ###### model_flags = opt["model_flags"] model = self.get_model()(**model_flags) ###### callbacks = [ # ModelSaver(max_to_keep=20), # TODO dynamic this ModelSaver(max_to_keep=opt["nr_epochs"]), # InjectShell(file='/tools/hover_net/src/config.yml', shell='ipython'), ] for param_name, param_info in opt["manual_parameters"].items(): model.add_manual_variable(param_name, param_info[0]) callbacks.append(ScheduledHyperParamSetter(param_name, param_info[1])) # multi-GPU inference (with mandatory queue prefetch) infs = [StatCollector()] callbacks.append( DataParallelInferenceRunner(valid_datagen, infs, list(range(nr_gpus))) ) callbacks.append(MaxSaver("valid_dice")) ###### steps_per_epoch = train_datagen.size() // nr_gpus config = TrainConfig( model=model, callbacks=callbacks, dataflow=train_datagen, steps_per_epoch=steps_per_epoch, max_epoch=opt["nr_epochs"], ) config.session_init = sess_init launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpus)) tf.reset_default_graph() # remove the entire graph in case of multiple runs # TODO: save return
def train_vae(params, checkpoint_dir, recover=True, force=False): if force and os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir) logger.set_logger_dir(checkpoint_dir) dataset_params = params['dataset'] model_params = params['model'] trainer_params = params['trainer'] train_ds, val_ds, sample_train, sample_val, _, _ = \ get_dataflow(dataset_params['path'], dataset_params['binarizer'], dataset_params['train_val_split'], trainer_params['batch_size'], trainer_params['num_parallel']) params.to_file(os.path.join(logger.get_logger_dir(), 'config.json')) latent_dim = model_params['latent_dim'] model = BaseVAE.from_params(model_params) trainer = SyncMultiGPUTrainerParameterServer( gpus=trainer_params['num_gpus'], ps_device=None) trainer_config = AutoResumeTrainConfig( always_resume=recover, model=model, dataflow=train_ds, callbacks=[ Sampling(model, trainer_params['num_examples_to_generate'], latent_dim, os.path.join(checkpoint_dir, 'images')), Reconstruct(model, sample_train, sample_val, os.path.join(checkpoint_dir, 'images')), ModelSaver(max_to_keep=5, checkpoint_dir=checkpoint_dir), InferenceRunner(input=val_ds, infs=ScalarStats(['avg_logpx_z', 'neg_elbo'])), MinSaver(monitor_stat='validation_neg_elbo'), CompressResource(os.path.join(checkpoint_dir, 'images'), os.path.join(checkpoint_dir, 'images.zip')), Notification('Training status', 'Complete') ], steps_per_epoch=trainer_params['steps_per_epoch'], max_epoch=trainer_params['max_epochs'] ) launch_train_with_config(trainer_config, trainer)
def _default_callbacks(self): self.callbacks = [ ModelSaver(max_to_keep=self.args.max_to_keep), EstimatedTimeLeft(), ] if self.args.gpu and self.args.gpu != "-1": self.callbacks.append(GPUUtilizationTracker()) if self.args.validation is not None: self.callbacks.append( InferenceRunner(self.dataflow(True), [ScalarStats(self.total_cost_var)])) self.callbacks.append( MinSaver(self.validation_total_cost_var if self.args. validation is not None else self.total_cost_var)) self._network_specific_callbacks()
def run_once(self, nr_gpus, freeze, sess_init=None, save_dir=None): #### train_datagen = self.get_datagen(mode='train') valid_datagen = self.get_datagen(mode='valid') ###### must be called before ModelSaver if save_dir is None: logger.set_logger_dir(self.save_dir) else: logger.set_logger_dir(save_dir) callbacks = [ ModelSaver(max_to_keep=200), ScheduledHyperParamSetter('learning_rate', self.lr_sched), ] ###### # multi-GPU inference (with mandatory queue prefetch) infs = [StatCollector()] callbacks.append( DataParallelInferenceRunner(valid_datagen, infs, list(range(nr_gpus)))) ###### steps_per_epoch = train_datagen.size() // nr_gpus MODEL_MAKER = Model_NP_XY if self.model_mode == 'np+xy' else Model_NP_DIST config = TrainConfig( model=MODEL_MAKER(freeze), callbacks=callbacks, dataflow=train_datagen, steps_per_epoch=steps_per_epoch, max_epoch=self.nr_epochs, ) config.session_init = sess_init launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(nr_gpus)) tf.reset_default_graph( ) # remove the entire graph in case of multiple runs return
def train_image_embedding_triplet(params, checkpoint_dir, recover=True, force=False): if force and os.path.exists(checkpoint_dir): shutil.rmtree(checkpoint_dir) logger.set_logger_dir(checkpoint_dir) dataset_params = params['dataset'] model_params = params['model'] trainer_params = params['trainer'] train_ds = get_triplet_dataflow( dataset_params['path'], trainer_params['items_per_batch'], trainer_params['images_per_item'], trainer_params['num_parallel']) vqvae_checkpoint_path = trainer_params['vqvae_checkpoint_path'] vqvae_config_path = os.path.join(os.path.split(vqvae_checkpoint_path)[0], 'config.json') model_params['vqvae_model_params'] = vqvae_config_path params.to_file(os.path.join(logger.get_logger_dir(), 'config.json')) model = BaseImageEmbedding.from_params(model_params) trainer = SyncMultiGPUTrainerParameterServer( gpus=trainer_params['num_gpus'], ps_device=None) trainer_config = AutoResumeTrainConfig( always_resume=recover, model=model, dataflow=train_ds, callbacks=[ ModelSaver(max_to_keep=5, checkpoint_dir=checkpoint_dir), MinSaver(monitor_stat='loss'), RestoreWeights(vqvae_checkpoint_path), SendStat('Training status', ['loss', 'pos_triplet_frac'], after_every=2), Notification('Training status', 'Complete') ], steps_per_epoch=trainer_params['steps_per_epoch'], max_epoch=trainer_params['max_epochs'] ) launch_train_with_config(trainer_config, trainer)
M.add(KL.Conv2D(32, 3, padding='same', activation='relu')) M.add(KL.Flatten()) M.add( KL.Dense(512, activation='relu', kernel_regularizer=keras.regularizers.l2(1e-5))) M.add(KL.Dropout(0.5)) M.add( KL.Dense(10, activation=None, kernel_regularizer=keras.regularizers.l2(1e-5))) M.add(KL.Activation('softmax')) return M dataset_train, dataset_test = get_data() M = KerasModel(model_func, inputs_desc=[ InputDesc(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, 1], 'images') ], targets_desc=[InputDesc(tf.float32, [None, 10], 'labels')], input=QueueInput(dataset_train)) M.compile(optimizer=tf.train.AdamOptimizer(1e-3), loss='categorical_crossentropy', metrics='categorical_accuracy') M.fit(validation_data=dataset_test, steps_per_epoch=dataset_train.size(), callbacks=[ModelSaver()])
M.add(KL.Conv2D(32, 3, activation='relu', padding='same')) M.add(KL.MaxPooling2D()) M.add(KL.Conv2D(32, 3, padding='same', activation='relu')) M.add(KL.Flatten()) M.add( KL.Dense(512, activation='relu', kernel_regularizer=regularizers.l2(1e-5))) M.add(KL.Dropout(0.5)) M.add( KL.Dense(10, activation=None, kernel_regularizer=regularizers.l2(1e-5))) M.add(KL.Activation('softmax')) trainer = SimpleTrainer() setup_keras_trainer(trainer, model=M, input=QueueInput(dataset_train), optimizer=tf.train.AdamOptimizer(1e-3), loss='categorical_crossentropy', metrics=['accuracy']) trainer.train_with_defaults( callbacks=[ ModelSaver(), InferenceRunner(dataset_test, [ScalarStats(['total_loss', 'accuracy'])]), ], steps_per_epoch=dataset_train.size(), )
def train_child(model_cls, args, log_dir, child_dir, prev_dir): """ """ if not os.path.exists(child_dir): os.mkdir(child_dir) if os.path.basename(child_dir) == "0" and args.use_init_model: init_model_dir = os.path.join(args.data_dir, 'init_model', args.ds_name) if os.path.exists(init_model_dir): # This implies that there exists init_model_dir, and we are in first model # so we do not need to train. Copy the model and mark finished logger.info("Skip first model as this model is fully trained.") cmd = "mkdir -p {cdir} ; cp {pdir}/* {cdir}/ ".format(\ cdir=child_dir, pdir=args.init_model_dir) _ = subprocess.check_output(cmd, shell=True) return # get training params for train-config (model, args, starting_epoch, lr_schedule, ds_train, insrc_train, train_cbs) = get_training_params(model_cls, args) ## Model callbacks # loss weight update ls_cbs_func = getattr(model, 'compute_loss_select_callbacks', None) if callable(ls_cbs_func): train_cbs.extend(ls_cbs_func()) # extra callback for general logging/ update. extra_callbacks = DEFAULT_CALLBACKS() if not args.do_remote_child_inf_runner: extra_callbacks = \ [ecb for ecb in extra_callbacks if not isinstance(ecb, ProgressBar)] logger.info("Extra callbacks are {}".format( [ecb.__class__ for ecb in extra_callbacks])) # Logging for analysis model_str = model.net_info.to_str() logger.info('LayerInfoListString is :\n {}'.format(model_str)) train_callbacks = [ ModelSaver(checkpoint_dir=child_dir, max_to_keep=1, keep_checkpoint_every_n_hours=100), ] + train_cbs if lr_schedule: train_callbacks.append( ScheduledHyperParamSetter('learning_rate', lr_schedule)) logger.info('The updated params for training is \n{}'.format(args)) config = TrainConfig( data=insrc_train, dataflow=ds_train, callbacks=train_callbacks, extra_callbacks=extra_callbacks, model=model, monitors=[JSONWriter(), ScalarPrinter()], #, TFEventWriter()], steps_per_epoch=args.steps_per_epoch, max_epoch=args.max_epoch, starting_epoch=starting_epoch) for dn in [child_dir, prev_dir]: if dn is None: continue ckpt = tf.train.latest_checkpoint(dn) if ckpt: if args.search_cat_based: restore_cls = SaverRestoreSizeRelaxed else: restore_cls = SaverRestore _ignore = [DYNAMIC_WEIGHTS_NAME] _sess_init_load = restore_cls(ckpt, ignore=_ignore) if dn == child_dir: # loading from self keep global step config.session_init = _sess_init_load else: # loading from others. Set global_step to 0 config.session_init = ChainInit([ _sess_init_load, AssignGlobalStep(0), ]) break launch_train_with_config(config, SyncMultiGPUTrainerParameterServer(args.nr_gpu)) return model
def train(args, cfg): out_dirs = gen_outdirs(args, "tp") output_dir, out_res_dir = out_dirs["output_dir"], out_dirs["out_res_dir"] df = PneuSegDF(args.mode, out_res_dir, args.train_dir, args.testset_dir, args.min_num_workers, cfg) num_gpu = max(get_num_gpu(), 1) ds = df.prepared(num_gpu, cfg.batch_size) # Avoid overwritting config file if os.path.exists(pj(output_dir, os.path.basename(args.config))): input( "Config file will NOT be overwritten. Press Enter to continue...") else: shutil.copy(args.config, output_dir) logger.set_logger_dir(pj(output_dir, "log")) callback_list = [ # PeriodicCallback overwritten the frequency of what's wrapped PeriodicCallback(ModelSaver(50, checkpoint_dir=output_dir), every_k_epochs=1), GPUUtilizationTracker(), MergeAllSummaries(1 if args.train_debug else 0), # ProgressBar(["Loss"]) ] if cfg.network["norm_layer"] == "BN_layers": callback_list.append(BN_layers_update()) if cfg.lr_schedule["type"] == "epoch_wise_constant": schedule = [(ep, lr / num_gpu) for ep, lr in zip( [0] + cfg.lr_schedule["epoch_to_drop_lr"], cfg.lr_schedule["lr"])] callback_list.append( ScheduledHyperParamSetter("learning_rate", schedule)) elif cfg.lr_schedule["type"] == "halved": schedule = [(0, cfg.lr_schedule["init_lr"])] for i in range(cfg.lr_schedule["first_epoch2drop"], cfg.max_epoch, cfg.lr_schedule["period"]): schedule.append( (i, schedule[int((i - cfg.lr_schedule["first_epoch2drop"]) / cfg.lr_schedule["period"])][1] / (cfg.lr_schedule["decay_rate"] * num_gpu))) print(schedule) callback_list.append( ScheduledHyperParamSetter("learning_rate", schedule)) steps_per_epoch = len(ds) // num_gpu + 1 train_cfg = TrainConfig( model=Tensorpack_model(cfg, steps_per_epoch), data=QueueInput(ds), steps_per_epoch=steps_per_epoch, callbacks=callback_list, monitors=[ # ScalarPrinter(True, whitelist=["Loss", "LR"]), ScalarPrinter(True), # ScalarPrinter(), TFEventWriter(), # JSONWriter() ], max_epoch=cfg.max_epoch, session_init=SmartInit(args.resume), starting_epoch=args.resume_epoch) launch_train_with_config( train_cfg, SyncMultiGPUTrainerReplicated(num_gpu) if num_gpu > 1 else SimpleTrainer())
def critic_train(ctrl, data, log_dir, model_dir, prev_dir, vs_name, split_train_val=False): if not os.path.exists(model_dir): os.makedirs(model_dir) lr_schedule = [] max_epoch = ctrl.critic_train_epoch lr = ctrl.critic_init_lr for epoch in range(0, max_epoch): if epoch % 1 == 0: lr_schedule.append((epoch + 1, lr)) lr *= 0.9 ds_size = len(data[0]) idxs = list(range(ds_size)) np.random.shuffle(idxs) if split_train_val: train_size = ds_size * 9 // 10 if train_size == 0: train_size = ds_size val_start = train_size else: train_size = ds_size val_start = ds_size * 9 // 10 if ds_size - val_start == 0: val_start = 0 data_train = [[col[k] for k in idxs[:train_size]] for col in data] data_val = [[col[k] for k in idxs[val_start:]] for col in data] model = critic_factory(ctrl, is_train=True, vs_name=vs_name) ds_train = critic_dataflow_factory(ctrl, data_train, is_train=True) ds_val = critic_dataflow_factory(ctrl, data_val, is_train=False) session_config = None device = 0 if ctrl.critic_type == CriticTypes.LSTM: session_config = tf.ConfigProto(device_count={'GPU': 0}) device = -1 extra_callbacks = DEFAULT_CALLBACKS() extra_callbacks = list( filter(lambda x: not isinstance(x, ProgressBar), extra_callbacks)) logger.info("Extra callbacks are {}".format( list(map(lambda x: x.__class__, extra_callbacks)))) # Put this into callbacks for in-training validation/inferencing inference_callback = InferenceRunner( ds_val, [ScalarStats('{}/cost'.format(vs_name))], device=device) config = TrainConfig( dataflow=ds_train, callbacks=[ ModelSaver(checkpoint_dir=model_dir, max_to_keep=1, keep_checkpoint_every_n_hours=100), ScheduledHyperParamSetter('learning_rate', lr_schedule) ], extra_callbacks=extra_callbacks, model=model, monitors=[JSONWriter(), ScalarPrinter()], #, TFEventWriter()], steps_per_epoch=ds_train.size(), max_epoch=max_epoch, session_config=session_config) ckpt = tf.train.latest_checkpoint(prev_dir if prev_dir else model_dir) if ckpt: config.session_init = SaverRestore(ckpt) launch_train_with_config(config, SimpleTrainer())