def run(): data_seed = 0 date = datetime.now() n_labeled = 4000 result_dir = "{root}/{dataset}/{model}/{date:%Y-%m-%d_%H:%M:%S}/{seed}".format( root='results/final_eval', dataset='cifar10_{}'.format(n_labeled), model='mean_teacher', date=date, seed=data_seed ) model = Model(result_dir=result_dir) model['flip_horizontally'] = True model['max_consistency_coefficient'] = 100.0 * n_labeled / 50000 model['adam_beta_2_during_rampup'] = 0.999 model['ema_decay_during_rampup'] = 0.999 model['normalize_input'] = False # Keep ZCA information model['rampdown_length'] = 25000 model['training_length'] = 150000 tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) cifar = Cifar10ZCA(data_seed, n_labeled) training_batches = minibatching.training_batches(cifar.training) evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation) model.train(training_batches, evaluation_batches_fn)
def run(data_seed, ema_decay_during_rampup, ema_decay_after_rampup, test_phase=False, n_labeled=250, n_extra_unlabeled=0, model_type='mean_teacher'): minibatch_size = 100 hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled) tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) svhn = SVHN(n_labeled=n_labeled, n_extra_unlabeled=n_extra_unlabeled, data_seed=data_seed, test_phase=test_phase) model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_cost'] = hyperparams['max_consistency_cost'] model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled'] model['training_length'] = hyperparams['training_length'] model['ema_decay_during_rampup'] = ema_decay_during_rampup model['ema_decay_after_rampup'] = ema_decay_after_rampup training_batches = minibatching.training_batches(svhn.training, minibatch_size, hyperparams['n_labeled_per_batch']) evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length): minibatch_size = 100 n_labeled_per_batch = 100 tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) cifar = SVHN(n_labeled=n_labeled, data_seed=data_seed, test_phase=test_phase) model['ema_consistency'] = True model['max_consistency_cost'] = 0.0 model['apply_consistency_to_labeled'] = False model['rampdown_length'] = rampdown_length model['training_length'] = training_length # Turn off augmentation model['translate'] = False model['flip_horizontally'] = False training_batches = minibatching.training_batches(cifar.training, minibatch_size, n_labeled_per_batch) evaluation_batches_fn = minibatching.evaluation_epoch_generator( cifar.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(data_seed, num_logits, logit_distance_cost, test_phase=False, n_labeled=500, n_extra_unlabeled=0, model_type='mean_teacher'): minibatch_size = 100 hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled) tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) svhn = SVHN(n_labeled=n_labeled, n_extra_unlabeled=n_extra_unlabeled, data_seed=data_seed, test_phase=test_phase) model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_cost'] = hyperparams['max_consistency_cost'] model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled'] model['training_length'] = hyperparams['training_length'] model['num_logits'] = num_logits model['logit_distance_cost'] = logit_distance_cost training_batches = minibatching.training_batches(svhn.training, minibatch_size, hyperparams['n_labeled_per_batch']) evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(result_dir, test_phase, n_labeled, data_seed, model_type): minibatch_size = 100 hyperparams = model_hyperparameters(model_type, n_labeled) tf.reset_default_graph() model = Model(result_dir=result_dir) cifar = Cifar10ZCA(n_labeled=n_labeled, data_seed=data_seed, test_phase=test_phase) model['flip_horizontally'] = True model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_coefficient'] = hyperparams['max_consistency_coefficient'] model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled'] model['adam_beta_2_during_rampup'] = 0.999 model['ema_decay_during_rampup'] = 0.999 model['normalize_input'] = False # Keep ZCA information model['rampdown_length'] = 25000 model['training_length'] = 150000 training_batches = minibatching.training_batches(cifar.training, minibatch_size, hyperparams['n_labeled_per_batch']) evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(result_dir, test_phase, n_labeled, n_extra_unlabeled, data_seed, model_type): minibatch_size = 100 hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled) tf.reset_default_graph() model = Model(result_dir=result_dir) svhn = SVHN(n_labeled=n_labeled, n_extra_unlabeled=n_extra_unlabeled, data_seed=data_seed, test_phase=test_phase) model['rampdown_length'] = 0 model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_coefficient'] = hyperparams[ 'max_consistency_coefficient'] model['apply_consistency_to_labeled'] = hyperparams[ 'apply_consistency_to_labeled'] model['training_length'] = hyperparams['training_length'] training_batches = minibatching.training_batches( svhn.training, minibatch_size, hyperparams['n_labeled_per_batch']) evaluation_batches_fn = minibatching.evaluation_epoch_generator( svhn.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length): minibatch_size = 100 n_labeled_per_batch = 100 tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) cifar = SVHN(n_labeled=n_labeled, data_seed=data_seed, test_phase=test_phase) model['ema_consistency'] = True model['max_consistency_cost'] = 0.0 model['apply_consistency_to_labeled'] = False model['rampdown_length'] = rampdown_length model['training_length'] = training_length # Turn off augmentation model['translate'] = False model['flip_horizontally'] = False training_batches = minibatching.training_batches(cifar.training, minibatch_size, n_labeled_per_batch) evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(): data_seed = 0 date = datetime.now() n_labeled = 500 n_extra_unlabeled = 0 result_dir = "{root}/{dataset}/{model}/{date:%Y-%m-%d_%H:%M:%S}/{seed}".format( root='results/final_eval', dataset='svhn_{}_{}'.format(n_labeled, n_extra_unlabeled), model='mean_teacher', date=date, seed=data_seed) model = Model(result_dir=result_dir) model['rampdown_length'] = 0 model['training_length'] = 180000 tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled) training_batches = minibatching.training_batches(svhn.training, n_labeled_per_batch=1) evaluation_batches_fn = minibatching.evaluation_epoch_generator( svhn.evaluation) model.train(training_batches, evaluation_batches_fn)
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length): minibatch_size = 100 n_labeled_per_batch = 100 tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) cifar = Cifar10ZCA(n_labeled=n_labeled, data_seed=data_seed, test_phase=test_phase) model['flip_horizontally'] = True model['ema_consistency'] = True model['max_consistency_cost'] = 0.0 model['apply_consistency_to_labeled'] = False model['adam_beta_2_during_rampup'] = 0.999 model['ema_decay_during_rampup'] = 0.999 model['normalize_input'] = False # Keep ZCA information model['rampdown_length'] = rampdown_length model['training_length'] = training_length training_batches = minibatching.training_batches(cifar.training, minibatch_size, n_labeled_per_batch) evaluation_batches_fn = minibatching.evaluation_epoch_generator( cifar.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(test_phase, n_labeled, n_extra_unlabeled, data_seed, model_type): minibatch_size = 100 hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled) tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) svhn = SVHN(n_labeled=n_labeled, n_extra_unlabeled=n_extra_unlabeled, data_seed=data_seed, test_phase=test_phase) model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_cost'] = hyperparams['max_consistency_cost'] model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled'] model['training_length'] = hyperparams['training_length'] # Turn off augmentation model['translate'] = False model['flip_horizontally'] = False training_batches = minibatching.training_batches(svhn.training, minibatch_size, hyperparams['n_labeled_per_batch']) evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(data_seed, dropout, input_noise, augmentation, test_phase=False, n_labeled=250, n_extra_unlabeled=0, model_type='mean_teacher'): minibatch_size = 100 hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled) tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) svhn = SVHN(n_labeled=n_labeled, n_extra_unlabeled=n_extra_unlabeled, data_seed=data_seed, test_phase=test_phase) model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_cost'] = hyperparams['max_consistency_cost'] model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled'] model['training_length'] = hyperparams['training_length'] model['student_dropout_probability'] = dropout model['teacher_dropout_probability'] = dropout model['input_noise'] = input_noise model['translate'] = augmentation training_batches = minibatching.training_batches(svhn.training, minibatch_size, hyperparams['n_labeled_per_batch']) evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length): minibatch_size = 100 n_labeled_per_batch = 100 tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) cifar = Cifar10ZCA(n_labeled=n_labeled, data_seed=data_seed, test_phase=test_phase) model['flip_horizontally'] = True model['ema_consistency'] = True model['max_consistency_cost'] = 0.0 model['apply_consistency_to_labeled'] = False model['adam_beta_2_during_rampup'] = 0.999 model['ema_decay_during_rampup'] = 0.999 model['normalize_input'] = False # Keep ZCA information model['rampdown_length'] = rampdown_length model['training_length'] = training_length training_batches = minibatching.training_batches(cifar.training, minibatch_size, n_labeled_per_batch) evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)
def run(data_seed=0): n_labeled = 1000 n_extra_unlabeled = 0 model = Model(RunContext(__file__, 0)) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled) training_batches = minibatching.training_batches(svhn.training, n_labeled_per_batch=1) evaluation_batches_fn = minibatching.evaluation_epoch_generator( svhn.evaluation) model.train(training_batches, evaluation_batches_fn)
def run(data_seed=0): n_labeled = 500 n_extra_unlabeled = 0 model = Model(RunContext(__file__, 0)) model['rampdown_length'] = 0 model['rampup_length'] = 5000 model['training_length'] = 40000 model['max_consistency_cost'] = 50.0 tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled) training_batches = minibatching.training_batches(svhn.training, n_labeled_per_batch=50) evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation) model.train(training_batches, evaluation_batches_fn)
def run(data_seed=0): n_labeled = 4000 model = Model(RunContext(__file__, 0)) model['flip_horizontally'] = True model['normalize_input'] = False # Keep ZCA information model['rampdown_length'] = 0 model['rampup_length'] = 5000 model['training_length'] = 40000 model['max_consistency_cost'] = 50.0 tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) cifar = Cifar10ZCA(data_seed, n_labeled) training_batches = minibatching.training_batches(cifar.training, n_labeled_per_batch=50) evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation) model.train(training_batches, evaluation_batches_fn)
def run(): data_seed = 0 n_labeled = 4000 model = Model(RunContext(__file__, 0)) model['flip_horizontally'] = True model['max_consistency_cost'] = 100.0 * n_labeled / 50000 model['adam_beta_2_during_rampup'] = 0.999 model['ema_decay_during_rampup'] = 0.999 model['normalize_input'] = False # Keep ZCA information model['rampdown_length'] = 25000 model['training_length'] = 150000 tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) cifar = Cifar10ZCA(data_seed, n_labeled) training_batches = minibatching.training_batches(cifar.training) evaluation_batches_fn = minibatching.evaluation_epoch_generator( cifar.evaluation) model.train(training_batches, evaluation_batches_fn)
def run(test_phase, data_seed, model_type): minibatch_size = 100 # fixed and pre-calculated (from file) n_labeled = 127737 n_all = 630346 + 127737 hyperparams = model_hyperparameters(model_type, n_labeled, n_all) tf.reset_default_graph() model = Model(RunContext(__file__, data_seed)) train_filename = '/root/storage/hdd/eyes_color/descriptions_files/train_labeled_unlabeled_1st_stage.txt' test_filename = '/root/storage/hdd/eyes_color/descriptions_files/test_base_path.txt' eye_dataset = Eye24(imgs_dir='/root/storage/hdd/', train_filename=train_filename, test_filename=test_filename) model['flip_horizontally'] = True model['ema_consistency'] = hyperparams['ema_consistency'] model['max_consistency_cost'] = hyperparams['max_consistency_cost'] model['apply_consistency_to_labeled'] = hyperparams[ 'apply_consistency_to_labeled'] model['adam_beta_2_during_rampup'] = 0.999 model['ema_decay_during_rampup'] = 0.999 model['normalize_input'] = False # Keep ZCA information # TODO not sure model['rampdown_length'] = 35000 model['training_length'] = 250000 training_batches = minibatching.training_batches_transform( eye_dataset.training, train_pipeline, minibatch_size) evaluation_batches_fn = minibatching.evaluation_epoch_generator_transform( eye_dataset.evaluation, eval_pipeline, minibatch_size) tensorboard_dir = model.save_tensorboard_graph() LOG.info("Saved tensorboard graph to %r", tensorboard_dir) model.train(training_batches, evaluation_batches_fn)