コード例 #1
0
results = extractor(tf.constant(content_image))

print('Styles:')
print_stats_of_layers(results['style'].items())

print("Contents:")
print_stats_of_layers(results['content'].items())

style_targets = extractor(style_image)['style']
content_targets = extractor(content_image)['content']
image = tf.Variable(content_image)
opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)

targs_style = style_targets, style_weight, num_style_layers
targs_content = content_targets, content_weight, num_content_layers

start = time.time()
step = 0
for n in range(epochs):
    for m in range(steps_per_epoch):
        step += 1
        train_step(image, extractor, opt, targs_style, targs_content)
        print(".", end='')
    if save_progress:
        tensor_to_image(image).save(output_file[:-4] + str(n) + '.png')
    print("Train step: {}".format(step))

end = time.time()
print("Total time: {:.1f}".format(end - start))
tensor_to_image(image).save(output_file)
コード例 #2
0
ファイル: run.py プロジェクト: pengyanhui/RSimEA
def train(model, triples, entities, un_ents, un_rels, test_pairs):
    logging.info("---------------Start Training---------------")

    ht_1, ht_2 = get_r_hts(triples, un_rels)
    rel_seeds = relation_seeds({}, ht_1, ht_2, un_rels)

    current_lr = config.learning_rate
    optimizer = get_optim(model, current_lr)
    if config.init_checkpoint:
        logging.info("Loading checkpoint...")
        checkpoint = torch.load(os.path.join(config.save_path, "checkpoint"))
        init_step = checkpoint["step"] + 1
        model.load_state_dict(checkpoint["model_state_dict"])
        if config.use_old_optimizer:
            current_lr = checkpoint["current_lr"]
            optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
    else:
        init_step = 1

    training_logs = []
    train_iterator = train_data_iterator(entities,
                                         new_triples(triples, rel_seeds, {}))
    # Training Loop
    for step in range(init_step, config.max_step):
        log = train_step(model, optimizer, next(train_iterator))
        training_logs.append(log)

        # log
        if step % config.log_step == 0:
            metrics = {}
            for metric in training_logs[0].keys():
                metrics[metric] = sum([log[metric] for log in training_logs
                                       ]) / len(training_logs)
            log_metrics("Training average", step, metrics)
            training_logs.clear()

        # warm up
        if step % config.warm_up_step == 0:
            current_lr *= 0.1
            logging.info("Change learning_rate to %f at step %d" %
                         (current_lr, step))
            optimizer = get_optim(model, current_lr)

        if step % config.update_step == 0:
            logging.info("Align entities and relations, swap parameters")
            seeds, align_e_1, align_e_2 = entity_seeds(model, un_ents)
            rel_seeds = relation_seeds(seeds, ht_1, ht_2, un_rels)
            new_entities = (entities[0] + align_e_2, entities[1] + align_e_1)
            train_iterator = train_data_iterator(
                new_entities, new_triples(triples, rel_seeds, seeds))
            save_variable_list = {
                "step": step,
                "current_lr": current_lr,
            }
            save_model(model, optimizer, save_variable_list)

    logging.info("---------------Test on test dataset---------------")
    metrics = test_step(model, test_pairs, un_ents)
    log_metrics("Test", config.max_step, metrics)

    logging.info("---------------Taining End---------------")
コード例 #3
0
                            vocabulary_val,
                            sequence_length_val,
                            set_name='val')
val_loader = torch.utils.data.DataLoader(dataset=data_val,
                                         batch_size=8,
                                         shuffle=True,
                                         num_workers=0,
                                         pin_memory=True)

global best_acc1
best_acc1 = 0

# Train and val
for epoch in tqdm(range(0, epochs)):
    print('开始epoch:', epoch)
    acc_t1, kappa_t = train_step(train_loader, model, criterion, optimizer)

    # evaluate on validation set
    acc_v1, kappa_v = validate_step(val_loader, model, criterion)
    writer.add_scalars('data/acc', {'train': acc_t1, 'val': acc_v1}, epoch)
    writer.add_scalars('data/kappa', {'train': kappa_t, 'val': kappa_v}, epoch)
    is_best = acc_v1 > best_acc1
    if is_best:
        epo = epoch
    best_acc1 = max(acc_v1, best_acc1)

    save_checkpoint_step(
        {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_acc1': best_acc1,
コード例 #4
0
def train(save_dir, num_epochs=300,
          learning_rate=0.0001, save_every_n_epochs=25):
  """Train pipeline for next sentence embedding prediction on ROCStories."""
  #### LOAD DATA ####
  datasets, embedding_matrices = prepare_datasets()

  #### CREATE MODEL AND OPTIMIZER ####
  num_input_sentences = tf.compat.v1.data.get_output_shapes(
      datasets['train'])[0][1]
  model = models.build_model(
      num_input_sentences=num_input_sentences,
      embedding_matrix=embedding_matrices['train'])
  metrics = model.create_metrics()
  optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
  checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)

  num_train_steps = 0

  #### DO TRAINING ####
  summary_writer = tf.summary.create_file_writer(
      os.path.join(save_dir, 'summaries'))
  with summary_writer.as_default():
    logging.info('Starting training.')
    for epoch in range(1, num_epochs+1):
      for x, labels in datasets['train']:
        utils.train_step(model, optimizer, x, labels, metrics)
        num_train_steps += 1

      start_time = time.time()
      utils.do_evaluation(model, metrics, datasets, embedding_matrices)
      logging.info('Eval took %f seconds.', (time.time() - start_time))

      to_log = ['%s=%f, ' % (m.name, m.result()) for m in metrics.values()]
      logging.info('Epoch %d, %s ', epoch, ''.join(to_log))

      # Add each metric to the TensorBoard and then reset it for the next epoch.
      for metric in metrics.values():
        tf.summary.scalar(
            metric.name, metric.result(), step=optimizer.iterations)
        metric.reset_states()

      # lr = cur_learning_rate(optimizer)
      # tf.summary.scalar('learning_rate', lr, step=optimizer.iterations)

      if epoch % save_every_n_epochs == 0:
        prefix = os.path.join(
            save_dir, 'ep%04d_step%05d.ckpt' % (epoch, num_train_steps))
        logging.info('Saving checkpoint: %s', prefix)
        checkpoint.save(file_prefix=prefix)

  #### SAVE HYPERPARAMETERS AND FINAL EVAL RESULTS TO FILE ####
  to_save = {}
  for metric in metrics.values():
    metric.reset_states()
  utils.do_evaluation(model, metrics, datasets, embedding_matrices)
  for metric in metrics.values():
    to_save['metric_' + metric.name] = metric.result().numpy()
  results_file_path = os.path.join(save_dir, 'final_eval.tsv')
  with gfile.GFile(results_file_path, 'w') as f:
    for name, value in to_save.iteritems():
      f.write('%s\t%s\n' % (name, str(value)))
コード例 #5
0
def train(model, triples, ent_num):
    logging.info("Start Training...")
    logging.info("batch_size = %d" % config.batch_size)
    logging.info("dim = %d" % config.ent_dim)
    logging.info("gamma = %f" % config.gamma)

    current_lr = config.learning_rate
    train_triples, valid_triples, test_triples = triples
    all_true_triples = train_triples + valid_triples + test_triples
    rtp = rel_type(train_triples)

    optimizer = get_optim("Adam", model, current_lr)
    train_iterator = train_data_iterator(train_triples, ent_num)

    if config.init_checkpoint:
        logging.info("Loading checkpoint...")
        checkpoint = torch.load(os.path.join(config.save_path, "checkpoint"))
        init_step = checkpoint["step"] + 1
        model.load_state_dict(checkpoint["model_state_dict"])
        if config.use_old_optimizer:
            current_lr = checkpoint["current_lr"]
            optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
    else:
        init_step = 1

    max_hit1 = 0.0
    max_mrr = 0.0
    training_logs = []
    # Training Loop
    for step in range(init_step, config.max_step):
        log = train_step(model, optimizer, next(train_iterator))
        training_logs.append(log)

        # log
        if step % config.log_step == 0:
            metrics = {}
            for metric in training_logs[0].keys():
                metrics[metric] = sum([log[metric] for log in training_logs
                                       ]) / len(training_logs)
            log_metrics("Training average", step, metrics)
            training_logs = []

        # valid
        if step % config.valid_step == 0:
            logging.info(
                "---------------Evaluating on Valid Dataset---------------")
            metrics = test_step(model, valid_triples, all_true_triples,
                                ent_num, rtp)
            metrics, metrics1, metrics2, metrics3, metrics4, metrics5, metrics6, metrics7, metrics8 = metrics
            logging.info("----------------Overall Results----------------")
            log_metrics("Valid", step, metrics)
            logging.info("-----------Prediction Head... 1-1 -------------")
            log_metrics("Valid", step, metrics1)
            logging.info("-----------Prediction Head... 1-M -------------")
            log_metrics("Valid", step, metrics2)
            logging.info("-----------Prediction Head... M-1 -------------")
            log_metrics("Valid", step, metrics3)
            logging.info("-----------Prediction Head... M-M -------------")
            log_metrics("Valid", step, metrics4)
            logging.info("-----------Prediction Tail... 1-1 -------------")
            log_metrics("Valid", step, metrics5)
            logging.info("-----------Prediction Tail... 1-M -------------")
            log_metrics("Valid", step, metrics6)
            logging.info("-----------Prediction Tail... M-1 -------------")
            log_metrics("Valid", step, metrics7)
            logging.info("-----------Prediction Tail... M-M -------------")
            log_metrics("Valid", step, metrics8)
            if metrics["HITS@1"] >= max_hit1 or metrics["MRR"] >= max_mrr:
                if metrics["HITS@1"] > max_hit1:
                    max_hit1 = metrics["HITS@1"]
                if metrics["MRR"] > max_mrr:
                    max_mrr = metrics["MRR"]
                save_variable_list = {
                    "step": step,
                    "current_lr": current_lr,
                }
                save_model(model, optimizer, save_variable_list)
            elif current_lr > 0.0000011:
                current_lr *= 0.1
                logging.info("Change learning_rate to %f at step %d" %
                             (current_lr, step))
                optimizer = get_optim("Adam", model, current_lr)
            else:
                logging.info(
                    "-------------------Training End-------------------")
                break
    # best state
    checkpoint = torch.load(os.path.join(config.save_path, "checkpoint"))
    model.load_state_dict(checkpoint["model_state_dict"])
    step = checkpoint["step"]
    logging.info(
        "-----------------Evaluating on Test Dataset-------------------")
    metrics = test_step(model, test_triples, all_true_triples, ent_num, rtp)
    metrics, metrics1, metrics2, metrics3, metrics4, metrics5, metrics6, metrics7, metrics8 = metrics
    logging.info("----------------Overall Results----------------")
    log_metrics("Test", step, metrics)
    logging.info("-----------Prediction Head... 1-1 -------------")
    log_metrics("Test", step, metrics1)
    logging.info("-----------Prediction Head... 1-M -------------")
    log_metrics("Test", step, metrics2)
    logging.info("-----------Prediction Head... M-1 -------------")
    log_metrics("Test", step, metrics3)
    logging.info("-----------Prediction Head... M-M -------------")
    log_metrics("Test", step, metrics4)
    logging.info("-----------Prediction Tail... 1-1 -------------")
    log_metrics("Test", step, metrics5)
    logging.info("-----------Prediction Tail... 1-M -------------")
    log_metrics("Test", step, metrics6)
    logging.info("-----------Prediction Tail... M-1 -------------")
    log_metrics("Test", step, metrics7)
    logging.info("-----------Prediction Tail... M-M -------------")
    log_metrics("Test", step, metrics8)
コード例 #6
0
ファイル: run.py プロジェクト: pengyanhui/LineaRE
def train(model, triples, ent_num):
    logging.info("Start Training...")
    logging.info("batch_size = %d" % config.batch_size)
    logging.info("dim = %d" % config.ent_dim)
    logging.info("gamma = %f" % config.gamma)

    current_lr = config.learning_rate
    train_triples, valid_triples, test_triples, symmetry_test, inversion_test, composition_test, others_test = triples
    all_true_triples = train_triples + valid_triples + test_triples
    r_tp = rel_type(train_triples)

    optimizer = get_optim("Adam", model, current_lr)

    if config.init_checkpoint:
        logging.info("Loading checkpoint...")
        checkpoint = torch.load(os.path.join(config.save_path, "checkpoint"),
                                map_location=torch.device("cuda:0"))
        init_step = checkpoint["step"] + 1
        model.load_state_dict(checkpoint["model_state_dict"])
        if config.use_old_optimizer:
            current_lr = checkpoint["current_lr"]
            optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
    else:
        init_step = 1

    true_all_heads, true_all_tails = get_true_ents(all_true_triples)
    train_iterator = train_data_iterator(train_triples, ent_num)
    test_data_list = test_data_sets(valid_triples, true_all_heads,
                                    true_all_tails, ent_num, r_tp)

    max_mrr = 0.0
    training_logs = []
    modes = ["Prediction Head", "Prediction Tail"]
    rtps = ["1-1", "1-M", "M-1", "M-M"]
    # Training Loop
    for step in range(init_step, config.max_step + 1):
        log = train_step(model, optimizer, next(train_iterator))
        training_logs.append(log)

        # log
        if step % config.log_step == 0:
            metrics = {}
            for metric in training_logs[0].keys():
                metrics[metric] = sum([log[metric] for log in training_logs
                                       ]) / len(training_logs)
            log_metrics("Training", step, metrics)
            training_logs.clear()

        # valid
        if step % config.valid_step == 0:
            logging.info("-" * 10 + "Evaluating on Valid Dataset" + "-" * 10)
            metrics = test_step(model, test_data_list, True)
            log_metrics("Valid", step, metrics[0])
            cnt_mode_rtp = 1
            for mode in modes:
                for rtp in rtps:
                    logging.info("-" * 10 + mode + "..." + rtp + "-" * 10)
                    log_metrics("Valid", step, metrics[cnt_mode_rtp])
                    cnt_mode_rtp += 1
            if metrics[0]["MRR"] >= max_mrr:
                max_mrr = metrics[0]["MRR"]
                save_variable_list = {
                    "step": step,
                    "current_lr": current_lr,
                }
                save_model(model, optimizer, save_variable_list)
            if step / config.max_step in [0.2, 0.5, 0.8]:
                current_lr *= 0.1
                logging.info("Change learning_rate to %f at step %d" %
                             (current_lr, step))
                optimizer = get_optim("Adam", model, current_lr)

    # load best state
    checkpoint = torch.load(os.path.join(config.save_path, "checkpoint"))
    model.load_state_dict(checkpoint["model_state_dict"])
    step = checkpoint["step"]

    # relation patterns
    test_datasets = [
        symmetry_test, inversion_test, composition_test, others_test
    ]
    test_datasets_str = ["Symmetry", "Inversion", "Composition", "Other"]
    for i in range(len(test_datasets)):
        dataset = test_datasets[i]
        dataset_str = test_datasets_str[i]
        if len(dataset) == 0:
            continue
        test_data_list = test_data_sets(dataset, true_all_heads,
                                        true_all_tails, ent_num, r_tp)
        logging.info("-" * 10 + "Evaluating on " + dataset_str + " Dataset" +
                     "-" * 10)
        metrics = test_step(model, test_data_list)
        log_metrics("Valid", step, metrics)

    # finally test
    test_data_list = test_data_sets(test_triples, true_all_heads,
                                    true_all_tails, ent_num, r_tp)
    logging.info("----------Evaluating on Test Dataset----------")
    metrics = test_step(model, test_data_list, True)
    log_metrics("Test", step, metrics[0])
    cnt_mode_rtp = 1
    for mode in modes:
        for rtp in rtps:
            logging.info("-" * 10 + mode + "..." + rtp + "-" * 10)
            log_metrics("Test", step, metrics[cnt_mode_rtp])
            cnt_mode_rtp += 1