Пример #1
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from,
         clean, visuals):
    util.check_required_program_args([model, training_cnf, data_dir])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO,
                      clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter, validation_iter = create_training_iters(
        cnf, data_set, model_def.crop_size, start_epoch,
        cnf.get('iterator_type', 'queued') == 'parallel')
    trainer = SupervisedTrainer(model,
                                cnf,
                                training_iter,
                                validation_iter,
                                classification=cnf['classification'])
    trainer.fit(data_set,
                weights_from,
                start_epoch,
                resume_lr,
                verbose=1,
                summary_every=cnf.get('summary_every', 10),
                clean=clean,
                visuals=visuals)
Пример #2
0
def main(model, training_cnf, data_dir, parallel, start_epoch, task_id, job_name, ps_hosts, worker_hosts, weights_from, resume_lr, gpu_memory_fraction, is_summary, loss_type):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    ps_hosts = ps_hosts.split(',')
    worker_hosts = worker_hosts.split(',')
    cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,
                                       'worker': worker_hosts})
    server = tf.train.Server(
        {'ps': ps_hosts,
         'worker': worker_hosts},
         job_name=job_name,
         task_index=task_id)

    util.init_logging('train.log', file_log_level=logging.INFO,
                      console_log_level=logging.INFO)
    if weights_from:
        weights_from = str(weights_from)

    if job_name == 'ps':
        server.join()
    else:
	    learner = DistSupervisedLearner(model, cnf, resume_lr=resume_lr, classification=cnf[
					'classification'], gpu_memory_fraction=gpu_memory_fraction, is_summary=is_summary, loss_type=loss_type, verbosity=1)
	    data_dir_train = os.path.join(data_dir, 'train')
	    data_dir_val = os.path.join(data_dir, 'val')
	    learner.fit(task_id, server, cluster_spec, data_dir_train, data_dir_val, weights_from=weights_from, start_epoch=start_epoch, training_set_size=50000, val_set_size=10000,
                summary_every=399, keep_moving_averages=True)
Пример #3
0
def main():
    train, test, _ = imdb.load_data(path='imdb.pkl',
                                    n_words=10000,
                                    valid_portion=0.1)
    trainX, trainY = train
    testX, testY = test

    trainX = pad_sequences(trainX, maxlen=100, value=0.)
    testX = pad_sequences(testX, maxlen=100, value=0.)
    trainY = np.asarray(trainY)
    testY = np.asarray(testY)
    data_set = DataSet(trainX, trainY, testX, testY)
    training_cnf = {
        'classification': True,
        'validation_scores': [('validation accuracy', util.accuracy_tf)],
        'num_epochs': 50,
        'input_size': (100, ),
        'lr_policy': StepDecayPolicy(schedule={
            0: 0.01,
            30: 0.001,
        })
    }
    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO)

    learner = SupervisedLearner(model,
                                training_cnf,
                                classification=training_cnf['classification'],
                                is_summary=False)
    learner.fit(data_set, weights_from=None, start_epoch=1)
Пример #4
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         resume_lr, gpu_memory_fraction, is_summary):
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    standardizer = cnf.get('standardizer', NoOpStandardizer())

    training_iter, validation_iter = create_training_iters(cnf,
                                                           data_set,
                                                           standardizer,
                                                           model_def.crop_size,
                                                           start_epoch,
                                                           parallel=parallel)
    trainer = SupervisedTrainer(model,
                                cnf,
                                training_iter,
                                validation_iter,
                                resume_lr=resume_lr,
                                classification=cnf['classification'],
                                gpu_memory_fraction=gpu_memory_fraction,
                                is_summary=is_summary,
                                loss_type='kappa_log')
    trainer.fit(data_set,
                weights_from,
                start_epoch,
                verbose=1,
                summary_every=399)
Пример #5
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         resume_lr, gpu_memory_fraction, is_summary, num_classes):
    model_def = util.load_module(model)
    model = model_def
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train_ss.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    standardizer = cnf.get('standardizer', NoOpStandardizer())

    training_iter, validation_iter = create_training_iters(cnf,
                                                           data_set,
                                                           standardizer,
                                                           model_def.crop_size,
                                                           start_epoch,
                                                           parallel=parallel)
    trainer = GenerativeLearner(model,
                                cnf,
                                training_iterator=training_iter,
                                validation_iterator=validation_iter,
                                resume_lr=resume_lr,
                                classification=cnf['classification'],
                                gpu_memory_fraction=gpu_memory_fraction,
                                is_summary=is_summary,
                                verbosity=2)
    trainer.fit(data_set,
                num_classes,
                weights_from,
                start_epoch,
                summary_every=399)
Пример #6
0
def main(model, training_cnf, data_dir, parallel, start_epoch, weights_from,
         weights_dir, resume_lr, gpu_memory_fraction, is_summary, loss_type):
    with tf.Graph().as_default():
        model_def = util.load_module(model)
        model = model_def.model
        cnf = util.load_module(training_cnf).cnf

        util.init_logging('train.log',
                          file_log_level=logging.INFO,
                          console_log_level=logging.INFO)
        if weights_from:
            weights_from = str(weights_from)

        trainer = SupervisedLearner(model,
                                    cnf,
                                    log_file_name='train_seg.log',
                                    resume_lr=resume_lr,
                                    classification=cnf['classification'],
                                    gpu_memory_fraction=gpu_memory_fraction,
                                    num_classes=15,
                                    is_summary=is_summary,
                                    loss_type=loss_type,
                                    verbosity=1)
        trainer.fit(data_dir,
                    weights_from=weights_from,
                    weights_dir=weights_dir,
                    start_epoch=start_epoch,
                    summary_every=399,
                    keep_moving_averages=True)
Пример #7
0
def train():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)

    width = 28
    height = 28

    train_images = mnist[0].images.reshape(-1, height, width, 1)
    train_labels = mnist[0].labels

    validation_images = mnist[1].images.reshape(-1, height, width, 1)
    validation_labels = mnist[1].labels

    data_set = DataSet(train_images, train_labels,
                       validation_images, validation_labels)

    training_cnf = {
        'classification': True,
        'validation_scores': [('validation accuracy', util.accuracy_wrapper), ('validation kappa', util.kappa_wrapper)],
        'num_epochs': 50,
        'lr_policy': StepDecayPolicy(
            schedule={
                0: 0.01,
                30: 0.001,
            }
        )
    }
    util.init_logging('train.log', file_log_level=logging.INFO,
                      console_log_level=logging.INFO)

    trainer = SupervisedTrainer(model, training_cnf, classification=training_cnf[
                                'classification'], is_summary=True)
    trainer.fit(data_set, weights_from=None,
                start_epoch=1, verbose=1, summary_every=10)
Пример #8
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from, clean):
    util.check_required_program_args([model, training_cnf, data_dir])
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log', file_log_level=logging.INFO, console_log_level=logging.INFO, clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter = BatchIterator(cnf['batch_size_train'], True)
    validation_iter = BatchIterator(cnf['batch_size_test'], True)
    trainer = SupervisedTrainer(model, cnf, training_iter, validation_iter, classification=cnf['classification'])
    trainer.fit(data_set, weights_from, start_epoch, resume_lr, verbose=1,
                summary_every=cnf.get('summary_every', 10), clean=clean)
Пример #9
0
def main(model, training_cnf, data_dir, start_epoch, resume_lr, weights_from,
         weights_exclude_scopes, trainable_scopes, clean, visuals):
    util.check_required_program_args([model, training_cnf, data_dir])
    sys.path.insert(0, '.')
    model_def = util.load_module(model)
    model = model_def.model
    cnf = util.load_module(training_cnf).cnf

    util.init_logging('train.log',
                      file_log_level=logging.INFO,
                      console_log_level=logging.INFO,
                      clean=clean)
    if weights_from:
        weights_from = str(weights_from)

    data_set = DataSet(data_dir, model_def.image_size[0])
    training_iter, validation_iter = create_training_iters(
        cnf, data_set, model_def.crop_size, start_epoch,
        cnf.get('iterator_type', 'parallel') == 'parallel')

    try:
        input_shape = (-1, model_def.crop_size[1], model_def.crop_size[0],
                       model_def.num_channels)
    except AttributeError:
        input_shape = (-1, model_def.crop_size[1], model_def.crop_size[0], 3)

    trainer = SupervisedTrainerQ(model,
                                 cnf,
                                 input_shape,
                                 trainable_scopes,
                                 training_iter,
                                 validation_iter,
                                 classification=cnf['classification'])
    trainer.fit(data_set,
                weights_from,
                weights_exclude_scopes,
                start_epoch,
                resume_lr,
                verbose=1,
                summary_every=cnf.get('summary_every', 10),
                clean=clean,
                visuals=visuals)
Пример #10
0
    return end_points(is_training)


training_cnf = {
    'classification':
    True,
    'validation_scores': [('validation accuracy', util.accuracy_wrapper),
                          ('validation kappa', util.kappa_wrapper)],
    'num_epochs':
    30,
    'lr_policy':
    StepDecayPolicy(schedule={
        0: 0.001,
        15: 0.0001,
    }),
    'optimizer':
    tf.train.AdamOptimizer()
}
util.init_logging('train.log',
                  file_log_level=logging.INFO,
                  console_log_level=logging.INFO)

trainer = SupervisedTrainer(model,
                            training_cnf,
                            classification=training_cnf['classification'])
trainer.fit(data_set,
            weights_from=None,
            start_epoch=1,
            verbose=1,
            summary_every=10)