def objective(trial):
        DAD, AD, DA = gen_normalized_adjs(dataset)

        alpha1 = trial.suggest_uniform("alpha1", 0.0, 1.0)
        alpha2 = trial.suggest_uniform("alpha2", 0.0, 1.0)
        A1 = trial.suggest_categorical('A1', ['DAD', 'DA', 'AD'])
        A2 = trial.suggest_categorical('A2', ['DAD', 'DA', 'AD'])

        if args.cs_fixed:
            scale = trial.suggest_loguniform("scale", 0.1, 10.0)

        logger = SimpleLogger('evaluate params', [], 2)
        for run, model_out in enumerate(model_outs):
            split_idx = splits_lst[run]
            out = torch.load(model_out, map_location='cpu')
            if args.cs_fixed:
                _, out_cs = double_correlation_fixed(dataset.label, out,
                                                     split_idx,
                                                     eval(A1), alpha1, 50,
                                                     eval(A2), alpha2, 50,
                                                     scale, args.hops)
            else:
                _, out_cs = double_correlation_autoscale(
                    dataset.label, out, split_idx, eval(A1), alpha1, 50,
                    eval(A2), alpha2, 50, args.hops)
            result = evaluate(None, dataset, split_idx, eval_func, out_cs)
            logger.add_result(run, (), (result[1], result[2]))
        res = logger.display()

        trial.set_user_attr('valid',
                            f'{res[:, 0].mean():.3f} ± {res[:, 0].std():.3f}')
        trial.set_user_attr('test',
                            f'{res[:, 1].mean():.3f} ± {res[:, 1].std():.3f}')

        return res[:, 0].mean()
Beispiel #2
0
def activate():
  assert callable(th.model)
  model = th.model(th)
  assert isinstance(model, Predictor)

  # Load data
  train_set, val_set, test_set = load_data(th.data_dir, th.memory_depth)

  # Train or evaluate
  if th.train:
    model.train(train_set, validation_set=val_set, trainer_hub=th)
  else:
    model.launch_model(overwrite=False)
    evaluate(model, train_set)
    evaluate(model, val_set)
    evaluate(model, test_set, plot=True)

  # End
  console.end()
Beispiel #3
0
    model_dir = f'models/{model_path}'
    print(model_dir)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    DAD, AD, DA = gen_normalized_adjs(dataset)

if args.method == 'lp':
    # handles label propagation separately
    for alpha in (.01, .1, .25, .5, .75, .9, .99):
        logger = Logger(args.runs, args)
        for run in range(args.runs):
            split_idx = split_idx_lst[run]
            train_idx = split_idx['train']
            model.alpha = alpha
            out = model(dataset, train_idx)
            result = evaluate(model, dataset, split_idx, eval_func, result=out)
            logger.add_result(run, result[:-1])
            print(f'alpha: {alpha} | Train: {100*result[0]:.2f} ' +
                  f'| Val: {100*result[1]:.2f} | Test: {100*result[2]:.2f}')

        best_val, best_test = logger.print_statistics()
        filename = f'results/{args.dataset}.csv'
        print(f"Saving results to {filename}")
        with open(f"{filename}", 'a+') as write_obj:
            sub_dataset = f'{args.sub_dataset},' if args.sub_dataset else ''
            write_obj.write(
                f"{args.method}," + f"{sub_dataset}" +
                f"{best_val.mean():.3f} ± {best_val.std():.3f}," +
                f"{best_test.mean():.3f} ± {best_test.std():.3f}\n")
    sys.exit()
Beispiel #4
0
                      train_labels_idx,
                      epochs=2,
                      batch_size=500,
                      verbose=1)
            model.save(args.model_path + args.model_name +
                       '.h5')  # 好像一定要有.h5作为结尾,真奇怪!

        elif out_args.mode == 'test':
            print('——————test——————')
            ###############################################
            model = biLstm_crf_model(args)
            model.load_weights(args.model_path + args.model_name + '.h5')
            predictions = model.predict(test_sentences_idx)
            pred_labels = get_pred_labels(predictions, test_sentences_len,
                                          args.idx2tag)
            f1 = evaluate(test_labels, pred_labels)
            print(f1)
            print(test_sentences[10:15])
            print('test:', test_labels[10:15])
            print('pred:', pred_labels[10:15])

    elif out_args.mode == 'demo':
        print('————————————demo————————————')
        model = biLstm_crf_model(args)
        model.load_weights(args.model_path + args.model_name + '.h5')
        while True:
            print('pleace input a sentence:')
            one_sentence_str = input()
            if one_sentence_str != '':
                one_sentence_list = [[char for char in one_sentence_str]]
                one_sentence_list, seq_len = pad_sequences(