Esempio n. 1
0
def main():
    arguments = load_arguments()
    config = load_config(arguments.config)

    train_labeled_dataloader, train_unlabeled_dataloader, validation_dataloader = load_train_data(config)
    model, ema_model, optimizer, ema_optimizer, semi_supervised, semi_supervised_loss = load(config)

    metrics = {
        'train_loss': 0,
        'train_accuracy': 0,
        'train_steps': 0,
        'validation_accuracy': 0,
        'validation_steps': 0
    }

    best_validation_accuracy = config.best_test_accuracy

    for epoch_step in range(config.epoch_step + 1, config.epochs + 1):
        train_labeled_dataloader_iterator = iter(train_labeled_dataloader)
        train_unlabeled_dataloader_iterator = iter(train_unlabeled_dataloader) if train_unlabeled_dataloader else None

        train_progress_bar = tqdm(range(config.iterations))
        for batch_step in train_progress_bar:
            inputs_x, targets_x, ub, train_labeled_dataloader_iterator, train_unlabeled_dataloader_iterator = on_train_batch_start(
                train_labeled_dataloader, train_unlabeled_dataloader, train_labeled_dataloader_iterator,
                train_unlabeled_dataloader_iterator, config)
            train_step(epoch_step, batch_step, inputs_x, targets_x, ub,
                       semi_supervised, semi_supervised_loss, model,
                       ema_model, optimizer, ema_optimizer, metrics, config)
            on_train_batch_end(epoch_step, inputs_x, targets_x, ema_model, metrics, train_progress_bar)

        test_progress_bar = tqdm(enumerate(validation_dataloader), total=len(validation_dataloader))
        for batch_step, batch in test_progress_bar:
            validation_step(ema_model, batch, metrics, config)
            on_validation_batch_end(epoch_step, metrics, test_progress_bar)

        best_validation_accuracy = on_epoch_end(epoch_step, best_validation_accuracy, model, ema_model, optimizer,
                                                config, metrics)

        metrics = {
            'train_loss': 0,
            'train_accuracy': 0,
            'train_steps': 0,
            'validation_accuracy': 0,
            'validation_steps': 0
        }
Esempio n. 2
0
# elimiate the first variable scope, and restore the classifier from the path
def restore_classifier_by_path(classifier, classifier_path, scope):
    new_vars = {}
    for var in classifier.params:
        pos = var.name.find('/')
        # eliminate the first variable scope, e.g., target, source
        new_vars[var.name[pos + 1:-2]] = var
    saver = tf.train.Saver(new_vars)
    saver.restore(sess, os.path.join(classifier_path, 'model'))
    logger.info("-----%s classifier model loading from %s successfully!-----" %
                (scope, classifier_path))


if __name__ == '__main__':
    args = load_arguments()
    assert args.domain_adapt, "domain_adapt arg should be True."

    if not os.path.isfile(args.multi_vocab):
        build_unify_vocab([args.target_train_path, args.source_train_path],
                          args.multi_vocab)
    multi_vocab = Vocabulary(args.multi_vocab)
    logger.info('vocabulary size: %d' % multi_vocab.size)

    # use tensorboard
    if args.suffix:
        tensorboard_dir = os.path.join(args.logDir, 'tensorboard', args.suffix)
    else:
        tensorboard_dir = os.path.join(args.logDir, 'tensorboard')
    if not os.path.exists(tensorboard_dir):
        os.makedirs(tensorboard_dir)