コード例 #1
0
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length):
    minibatch_size = 100
    n_labeled_per_batch = 100

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    cifar = Cifar10ZCA(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase)

    model['flip_horizontally'] = True
    model['ema_consistency'] = True
    model['max_consistency_cost'] = 0.0
    model['apply_consistency_to_labeled'] = False
    model['adam_beta_2_during_rampup'] = 0.999
    model['ema_decay_during_rampup'] = 0.999
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = rampdown_length
    model['training_length'] = training_length

    training_batches = minibatching.training_batches(cifar.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #2
0
def run(data_seed, ema_decay_during_rampup, ema_decay_after_rampup,
        test_phase=False, n_labeled=250, n_extra_unlabeled=0, model_type='mean_teacher'):
    minibatch_size = 100
    hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled)

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    svhn = SVHN(n_labeled=n_labeled,
                n_extra_unlabeled=n_extra_unlabeled,
                data_seed=data_seed,
                test_phase=test_phase)

    model['ema_consistency'] = hyperparams['ema_consistency']
    model['max_consistency_cost'] = hyperparams['max_consistency_cost']
    model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled']
    model['training_length'] = hyperparams['training_length']
    model['ema_decay_during_rampup'] = ema_decay_during_rampup
    model['ema_decay_after_rampup'] = ema_decay_after_rampup

    training_batches = minibatching.training_batches(svhn.training,
                                                     minibatch_size,
                                                     hyperparams['n_labeled_per_batch'])
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #3
0
def run(test_phase, n_labeled, n_extra_unlabeled, data_seed, model_type):
    minibatch_size = 100
    hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled)

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    svhn = SVHN(n_labeled=n_labeled,
                n_extra_unlabeled=n_extra_unlabeled,
                data_seed=data_seed,
                test_phase=test_phase)

    model['ema_consistency'] = hyperparams['ema_consistency']
    model['max_consistency_cost'] = hyperparams['max_consistency_cost']
    model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled']
    model['training_length'] = hyperparams['training_length']

    # Turn off augmentation
    model['translate'] = False
    model['flip_horizontally'] = False

    training_batches = minibatching.training_batches(svhn.training,
                                                     minibatch_size,
                                                     hyperparams['n_labeled_per_batch'])
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #4
0
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length):
    minibatch_size = 100
    n_labeled_per_batch = 100

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    cifar = Cifar10ZCA(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase)

    model['flip_horizontally'] = True
    model['ema_consistency'] = True
    model['max_consistency_cost'] = 0.0
    model['apply_consistency_to_labeled'] = False
    model['adam_beta_2_during_rampup'] = 0.999
    model['ema_decay_during_rampup'] = 0.999
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = rampdown_length
    model['training_length'] = training_length

    training_batches = minibatching.training_batches(cifar.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        cifar.evaluation, minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length):
    minibatch_size = 100
    n_labeled_per_batch = 100

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    cifar = SVHN(n_labeled=n_labeled,
                 data_seed=data_seed,
                 test_phase=test_phase)

    model['ema_consistency'] = True
    model['max_consistency_cost'] = 0.0
    model['apply_consistency_to_labeled'] = False
    model['rampdown_length'] = rampdown_length
    model['training_length'] = training_length

    # Turn off augmentation
    model['translate'] = False
    model['flip_horizontally'] = False

    training_batches = minibatching.training_batches(cifar.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #6
0
def run(data_seed, dropout, input_noise, augmentation,
        test_phase=False, n_labeled=250, n_extra_unlabeled=0, model_type='mean_teacher'):
    minibatch_size = 100
    hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled)

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    svhn = SVHN(n_labeled=n_labeled,
                n_extra_unlabeled=n_extra_unlabeled,
                data_seed=data_seed,
                test_phase=test_phase)

    model['ema_consistency'] = hyperparams['ema_consistency']
    model['max_consistency_cost'] = hyperparams['max_consistency_cost']
    model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled']
    model['training_length'] = hyperparams['training_length']
    model['student_dropout_probability'] = dropout
    model['teacher_dropout_probability'] = dropout
    model['input_noise'] = input_noise
    model['translate'] = augmentation

    training_batches = minibatching.training_batches(svhn.training,
                                                     minibatch_size,
                                                     hyperparams['n_labeled_per_batch'])
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #7
0
def run(data_seed, num_logits, logit_distance_cost, test_phase=False, n_labeled=500, n_extra_unlabeled=0, model_type='mean_teacher'):
    minibatch_size = 100
    hyperparams = model_hyperparameters(model_type, n_labeled, n_extra_unlabeled)

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    svhn = SVHN(n_labeled=n_labeled,
                n_extra_unlabeled=n_extra_unlabeled,
                data_seed=data_seed,
                test_phase=test_phase)

    model['ema_consistency'] = hyperparams['ema_consistency']
    model['max_consistency_cost'] = hyperparams['max_consistency_cost']
    model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled']
    model['training_length'] = hyperparams['training_length']
    model['num_logits'] = num_logits
    model['logit_distance_cost'] = logit_distance_cost

    training_batches = minibatching.training_batches(svhn.training,
                                                     minibatch_size,
                                                     hyperparams['n_labeled_per_batch'])
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #8
0
def run(result_dir, test_phase, n_labeled, data_seed, model_type):
    minibatch_size = 100
    hyperparams = model_hyperparameters(model_type, n_labeled)

    tf.reset_default_graph()
    model = Model(result_dir=result_dir)

    cifar = Cifar10ZCA(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase)

    model['flip_horizontally'] = True
    model['ema_consistency'] = hyperparams['ema_consistency']
    model['max_consistency_coefficient'] = hyperparams['max_consistency_coefficient']
    model['apply_consistency_to_labeled'] = hyperparams['apply_consistency_to_labeled']
    model['adam_beta_2_during_rampup'] = 0.999
    model['ema_decay_during_rampup'] = 0.999
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = 25000
    model['training_length'] = 150000

    training_batches = minibatching.training_batches(cifar.training,
                                                     minibatch_size,
                                                     hyperparams['n_labeled_per_batch'])
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation,
                                                                    minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #9
0
def run(test_phase, data_seed, n_labeled, training_length, rampdown_length):
    minibatch_size = 100
    n_labeled_per_batch = 100

    tf.reset_default_graph()
    model = Model(RunContext(__file__, data_seed))

    cifar = SVHN(n_labeled=n_labeled,
                 data_seed=data_seed,
                 test_phase=test_phase)

    model['ema_consistency'] = True
    model['max_consistency_cost'] = 0.0
    model['apply_consistency_to_labeled'] = False
    model['rampdown_length'] = rampdown_length
    model['training_length'] = training_length

    # Turn off augmentation
    model['translate'] = False
    model['flip_horizontally'] = False

    training_batches = minibatching.training_batches(cifar.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        cifar.evaluation, minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #10
0
def run(result_dir, test_phase, n_labeled, n_extra_unlabeled, data_seed,
        model_type):
    minibatch_size = 100
    hyperparams = model_hyperparameters(model_type, n_labeled,
                                        n_extra_unlabeled)

    tf.reset_default_graph()
    model = Model(result_dir=result_dir)

    svhn = SVHN(n_labeled=n_labeled,
                n_extra_unlabeled=n_extra_unlabeled,
                data_seed=data_seed,
                test_phase=test_phase)

    model['rampdown_length'] = 0
    model['ema_consistency'] = hyperparams['ema_consistency']
    model['max_consistency_coefficient'] = hyperparams[
        'max_consistency_coefficient']
    model['apply_consistency_to_labeled'] = hyperparams[
        'apply_consistency_to_labeled']
    model['training_length'] = hyperparams['training_length']

    training_batches = minibatching.training_batches(
        svhn.training, minibatch_size, hyperparams['n_labeled_per_batch'])
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        svhn.evaluation, minibatch_size)

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    model.train(training_batches, evaluation_batches_fn)
コード例 #11
0
def run():
    data_seed = 0
    date = datetime.now()
    n_labeled = 4000


    result_dir = "{root}/{dataset}/{model}/{date:%Y-%m-%d_%H:%M:%S}/{seed}".format(
        root='results/final_eval',
        dataset='cifar10_{}'.format(n_labeled),
        model='mean_teacher',
        date=date,
        seed=data_seed
    )

    model = Model(result_dir=result_dir)
    model['flip_horizontally'] = True
    model['max_consistency_coefficient'] = 100.0 * n_labeled / 50000
    model['adam_beta_2_during_rampup'] = 0.999
    model['ema_decay_during_rampup'] = 0.999
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = 25000
    model['training_length'] = 150000

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    cifar = Cifar10ZCA(data_seed, n_labeled)
    training_batches = minibatching.training_batches(cifar.training)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #12
0
ファイル: train_svhn.py プロジェクト: vertix/mean-teacher
def run():
    data_seed = 0
    date = datetime.now()
    n_labeled = 500
    n_extra_unlabeled = 0

    result_dir = "{root}/{dataset}/{model}/{date:%Y-%m-%d_%H:%M:%S}/{seed}".format(
        root='results/final_eval',
        dataset='svhn_{}_{}'.format(n_labeled, n_extra_unlabeled),
        model='mean_teacher',
        date=date,
        seed=data_seed)

    model = Model(result_dir=result_dir)
    model['rampdown_length'] = 0
    model['training_length'] = 180000

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled)
    training_batches = minibatching.training_batches(svhn.training,
                                                     n_labeled_per_batch=1)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        svhn.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #13
0
def run(test_phase, n_labeled, data_seed, data_type, bg_noise, ict=False):

    if ict:
        from mean_teacher.mean_teacher_ict_final import mean_teacher
    else:
        from mean_teacher.mean_teacher import mean_teacher

    minibatch_size = 100

    data = data_loader(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase,
                       bg_noise=bg_noise,
                       urban_noise=False)

    print('{} is loaded with {} of training samples'.format(
        datasets_name[FLAGS.dataset_index], data['num_train']))

    if ict == True:
        n_labeled_per_batch = int(minibatch_size / 2)
        max_consistency_cost = n_labeled_per_batch
    elif n_labeled == 'all':
        n_labeled_per_batch = minibatch_size
        max_consistency_cost = minibatch_size
    else:
        n_labeled_per_batch = 'vary'
        max_consistency_cost = minibatch_size * int(
            n_labeled) / data['num_train']

    hyper_dcit = {
        'input_dim': data['input_dim'],
        'label_dim': data['label_dim'],
        'cnn': 'audio',
        'flip_horizontally': False,
        'max_consistency_cost': max_consistency_cost,
        'apply_consistency_to_labeled': True,
        'adam_beta_2_during_rampup': 0.999,
        'ema_decay_during_rampup': 0.999,
        'normalize_input': False,
        'rampdown_length': 25000,
        'rampup_length': 40000,
        'training_length': 80000,
        'bg_noise_input': data['bg_noise_img'],
        'bg_noise_level': 0,
        'cons_loss': 'softmax'
    }

    tf.reset_default_graph()
    runner_name = os.path.basename(__file__).split(".")[0]
    file_name = '{}_{}'.format(runner_name, n_labeled)
    model = mean_teacher(RunContext(file_name, data_seed), hyper_dcit)

    training_batches = minibatching.training_batches(data.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        data.evaluation, minibatch_size)

    model.train(training_batches, evaluation_batches_fn)
コード例 #14
0
def run(data_seed=42):
    n_extra_unlabeled = 20000

    model = W266Model(RunContext(__file__, 0))

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    tweetData = TweetData(data_seed, n_extra_unlabeled)
    training_batches = minibatching.training_batches(
        tweetData.training, n_labeled_per_batch='vary')
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        tweetData.evaluation)

    model.train(training_batches, evaluation_batches_fn)

    LOG.info("Result on test set:")
    tweetDataTest = TweetData(data_seed, test_phase=True)
    evaluation_test_batches_fn = minibatching.evaluation_epoch_generator(
        tweetDataTest.evaluation)
    model.evaluate(evaluation_test_batches_fn)
コード例 #15
0
def run(test_phase, n_labeled, data_seed):
    minibatch_size = 100

    data = data_loader(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase)

    print('{} is loaded with {} of training samples'.format(
        datasets_name[FLAGS.dataset_index], data['num_train']))

    if n_labeled == 'all':
        n_labeled_per_batch = minibatch_size
        max_consistency_cost = minibatch_size
    else:
        # n_labeled_per_batch = 'vary'
        n_labeled_per_batch = 20
        max_consistency_cost = minibatch_size * int(
            n_labeled) / data['num_train']

    hyper_dcit = {
        'input_dim': data['input_dim'],
        'label_dim': data['label_dim'],
        'flip_horizontally': True,
        'max_consistency_cost': max_consistency_cost,
        'apply_consistency_to_labeled': True,
        'adam_beta_2_during_rampup': 0.999,
        'ema_decay_during_rampup': 0.999,
        'normalize_input': False,
        'rampdown_length': 25000,
        'training_length': 150000,
        'test_only': FLAGS.test_only
    }

    tf.reset_default_graph()
    runner_name = os.path.basename(__file__).split(".")[0]
    file_name = '{}_{}'.format(runner_name, n_labeled)
    log_plot = Training_log_plot(file_name, data_seed)
    model = mean_teacher(RunContext(file_name, data_seed), hyper_dcit)

    training_batches = minibatching.training_batches(data.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        data.evaluation, minibatch_size)

    if FLAGS.test_only:
        model.restore(FLAGS.ckp)
        model.evaluate(evaluation_batches_fn)
    else:
        model.train(training_batches, evaluation_batches_fn)
コード例 #16
0
def run(data_seed=0):
    n_labeled = 1000
    n_extra_unlabeled = 0

    model = Model(RunContext(__file__, 0))

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled)
    training_batches = minibatching.training_batches(svhn.training,
                                                     n_labeled_per_batch=1)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        svhn.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #17
0
ファイル: train_svhn.py プロジェクト: ys2899/mean-teacher
def run(data_seed=0):
    n_labeled = 500
    n_extra_unlabeled = 0

    model = Model(RunContext(__file__, 0))
    model['rampdown_length'] = 0
    model['rampup_length'] = 5000
    model['training_length'] = 40000
    model['max_consistency_cost'] = 50.0

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled)
    training_batches = minibatching.training_batches(svhn.training, n_labeled_per_batch=50)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #18
0
def run(data_seed=0):
    n_labeled = 500
    n_extra_unlabeled = 0

    model = Model(RunContext(__file__, 0))
    model['rampdown_length'] = 0
    model['rampup_length'] = 5000
    model['training_length'] = 40000
    model['max_consistency_cost'] = 50.0

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    svhn = SVHN(data_seed, n_labeled, n_extra_unlabeled)
    training_batches = minibatching.training_batches(svhn.training, n_labeled_per_batch=50)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(svhn.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #19
0
def run(data_seed=0):
    n_labeled = 4000

    model = Model(RunContext(__file__, 0))
    model['flip_horizontally'] = True
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = 0
    model['rampup_length'] = 5000
    model['training_length'] = 40000
    model['max_consistency_cost'] = 50.0

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    cifar = Cifar10ZCA(data_seed, n_labeled)
    training_batches = minibatching.training_batches(cifar.training, n_labeled_per_batch=50)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #20
0
ファイル: train_cifar10.py プロジェクト: ys2899/mean-teacher
def run(data_seed=0):
    n_labeled = 4000

    model = Model(RunContext(__file__, 0))
    model['flip_horizontally'] = True
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = 0
    model['rampup_length'] = 5000
    model['training_length'] = 40000
    model['max_consistency_cost'] = 50.0

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    cifar = Cifar10ZCA(data_seed, n_labeled)
    training_batches = minibatching.training_batches(cifar.training, n_labeled_per_batch=50)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(cifar.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #21
0
def run(test_phase, n_labeled, data_seed, data_type, bg_noise):

    minibatch_size = 100
    n_labeled_per_batch = minibatch_size

    data = data_loader(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase)

    print('{} is loaded with {} of training samples'.format(
        datasets_name[FLAGS.dataset_index], data['num_train']))

    hyper_dcit = {
        'input_dim': data['input_dim'],
        'label_dim': data['label_dim'],
        'cnn': 'audio',
        'flip_horizontally': False,
        'max_consistency_cost': 0,
        'apply_consistency_to_labeled': False,
        'adam_beta_2_during_rampup': 0.999,
        'ema_decay_during_rampup': 0.999,
        'normalize_input': True,
        'rampdown_length': 25000,
        'rampup_length': 40000,
        'training_length': 80000
    }

    tf.reset_default_graph()
    runner_name = os.path.basename(__file__).split(".")[0]
    file_name = '{}_{}'.format(runner_name, n_labeled)
    model = mean_teacher(RunContext(file_name, data_seed), hyper_dcit)

    training_batches = minibatching.training_batches(data.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        data.evaluation, minibatch_size)

    model.train(training_batches, evaluation_batches_fn)
コード例 #22
0
ファイル: train_cifar10.py プロジェクト: zqsunny/mean-teacher
def run():
    data_seed = 0
    n_labeled = 4000

    model = Model(RunContext(__file__, 0))
    model['flip_horizontally'] = True
    model['max_consistency_cost'] = 100.0 * n_labeled / 50000
    model['adam_beta_2_during_rampup'] = 0.999
    model['ema_decay_during_rampup'] = 0.999
    model['normalize_input'] = False  # Keep ZCA information
    model['rampdown_length'] = 25000
    model['training_length'] = 150000

    tensorboard_dir = model.save_tensorboard_graph()
    LOG.info("Saved tensorboard graph to %r", tensorboard_dir)

    cifar = Cifar10ZCA(data_seed, n_labeled)
    training_batches = minibatching.training_batches(cifar.training)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        cifar.evaluation)

    model.train(training_batches, evaluation_batches_fn)
コード例 #23
0
def run(test_phase, n_labeled, data_seed):

    minibatch_size = 100

    data = data_loader(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=test_phase)

    if n_labeled == 'all':
        n_labeled_per_batch =  minibatch_size
        max_consistency_cost = minibatch_size
    else:
        n_labeled_per_batch = 'vary'
        max_consistency_cost = minibatch_size* int(n_labeled) / data['num_train']

    hyper_dcit = {'input_dim': data['input_dim'],
                'label_dim': data['label_dim'],
                'cnn':'tower',
                'flip_horizontally':True,
                'max_consistency_cost': max_consistency_cost,
                'adam_beta_2_during_rampup': 0.999,
                'ema_decay_during_rampup': 0.999,
                'normalize_input': False,
                'rampdown_length': 25000,
                'training_length': 150000 }

    tf.reset_default_graph()
    model = mean_teacher(RunContext(__file__, data_seed), hyper_dcit)

    training_batches = minibatching.training_batches(data.training,
                                                     minibatch_size,
                                                     n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(data.evaluation,
                                                                    minibatch_size)

    model.train(training_batches, evaluation_batches_fn)
コード例 #24
0
ファイル: har.py プロジェクト: I2RDL2/ASTAR-HAR
def run(n_labeled, data_seed):

    data = data_loader(n_labeled=n_labeled,
                       data_seed=data_seed,
                       test_phase=True)
    #dataset_detail = args.dataset_detail)

    print('{} is loaded with {} of training samples'.format(
        args.dataset, data['num_train']))

    if n_labeled == 'all':
        args.n_labeled_per_batch = args.minibatch_size
        args.max_consistency_cost = args.minibatch_size
    else:
        if args.max_consistency_cost != 0:
            args.max_consistency_cost = args.minibatch_size * int(
                n_labeled) / data['num_train']

    tf.reset_default_graph()
    runner_name = os.path.basename(__file__).split(".")[0]
    runner_name = args.save
    file_name = '{}_{}'.format(runner_name, n_labeled)
    log_plot = Training_log_plot(file_name, data_seed)
    model = mean_teacher(RunContext(file_name, data_seed), args, log_plot)

    training_batches = minibatching.training_batches(data.training,
                                                     args.minibatch_size,
                                                     args.n_labeled_per_batch)
    evaluation_batches_fn = minibatching.evaluation_epoch_generator(
        data.evaluation, args.minibatch_size)
    test_batches_fn = minibatching.evaluation_epoch_generator(data.evaluation,
                                                              batch_size=260)

    # import pdb; pdb.set_trace()
    if args.test_only:

        print('loading folers')
        root_path = "./results/"
        folders = os.listdir(root_path)
        assert args.ckp != '.', 'No ckp info was input'
        for i in range(len(folders)):
            folders[i] = os.path.join(root_path, folders[i])
            #print(folders[i])
        for folder in folders:
            if args.ckp in folder:
                print(folder)

                matrix = []
                for random_seed in os.listdir(folder):
                    ckp_path = os.path.join(folder, random_seed, 'transient')
                    ckp = tf.train.latest_checkpoint(ckp_path)
                    print('restore checkpoint from {}'.format(ckp))
                    model.restore(ckp)

                    confuse_matrix = model.confusion_matrix(test_batches_fn)
                    print(acc_from_confuse(confuse_matrix))
                    acc_matrix = acc_from_confuse(confuse_matrix)
                    matrix.append(acc_matrix)

                save_confuse_matrix(matrix, ckp_path)

    else:
        model.train(training_batches, evaluation_batches_fn)