def run_test(sess, test_file):
    """Testing the model
    """
    test_data = []
    test_options = []

    X = tf.get_collection('input')[0]
    V = tf.get_collection('input')[1]

    with open(test_file, "r") as f:
        header = f.readline()
        reader = csv.reader(f, delimiter=",")
        for row in reader:
            x_location = int(row[1]) // 15 + 2
            test_data.append([int(row[0]), float(row[5]), float(row[6]), float(row[7])])  # Trial, V, Grad, Ref_force
            test_options.append([int(row[0]), x_location, float(row[3]), float(row[4]), float(row[5]), float(row[7])]) # Trial, x_location, z_location, velocity, voltage, Ref_force


    # Data Transformations
    test_seq = np.array(range(FLAGS.seq_length, np.shape(test_data)[0]))
    test_idxs = np.array([test_seq - n for n in reversed(range(0, FLAGS.seq_length))]).T

    # Test Inputs
    test_input = np.array(test_data)[:, (1, 2)]  # V, gradV
    test_input = np.reshape(test_input[test_idxs], [-1, FLAGS.seq_length, FLAGS.input_dim])
    test_V = np.reshape(np.array(test_data)[:, 1][test_seq], [-1, 1])

    # Test Run
    estimate_loc, estimate_force = sess.run(tf.get_collection('test_ops'), feed_dict={X: test_input, V: test_V})

    estimate_force = np.reshape(estimate_force, [-1])
    estimate_loc_prob = np.reshape(estimate_loc, (-1, FLAGS.num_class)).tolist()

    test_options = np.array(test_options)
    test_options = np.reshape(test_options[test_seq], [-1, 6])

    test_force_truth = np.array(test_data)[test_seq, 3]
    test_loc_truth = test_options[:, 1]
    test_loc_truth = np.reshape(test_loc_truth, [-1, 1])

    print_result(test_options, estimate_force, test_force_truth, estimate_loc, test_loc_truth, test_seq)
示例#2
0
def main():
    try:
        username, password, tenant_name, keystone_url = \
                                                    utils.get_token_config()
        glance_url = utils.get_glance_url()

        if DEBUG:
            print username, password, tenant_name, keystone_url, glance_url

        user = keystone.Keystone(username, password,
                                 tenant_name, keystone_url)
        token = user.get_token()

        # glance_url = 'http://localhost:9292'
        glance = glanceclient.Client(endpoint=glance_url, token=token)

        image = glance.images.create(name='__nvs_monitor__',
                                     data='a' * 1024,
                                     disk_format='qcow2',
                                     container_format='ovf',
                                     is_public=False)

        if image.status != 'active':
            print 'create image error. %s' % image

        image.delete()

        try:
            image.get()
        except glanceexc.HTTPNotFound:
            pass

    except Exception:
        if DEBUG:
            utils.print_traceback()
        result = 'failed'
    else:
        result = 'success'

    utils.print_result(result)
示例#3
0
    args.sample = 4
    args.lam = 1.0
    args.tem = 0.2
    args.alpha = 0.5
    args.lr = 0.2
    args.bn = True
    args.input_dropout = 0.6
    args.hidden_dropout = 0.8
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "grand")
示例#4
0
def proteins_config(args):
    return args


@register_func("collab")
def collab_config(args):
    args.degree_feature = True
    return args


def run(dataset_name):
    args = build_default_args_for_unsupervisde_graph_classification(
        dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["mutag", "imdb-b", "imdb-m", "proteins", "collab"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "infograph")
示例#5
0
@register_func("proteins")
def proteins_config(args):
    return args


@register_func("collab")
def collab_config(args):
    args.degree_feature = True
    return args


def run(dataset_name):
    args = build_default_args_for_graph_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["mutag", "imdb-b", "imdb-m", "proteins", "collab"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "patchy_san")
示例#6
0
def train(epochs=20,
          batchSize=64,
          lr=0.0001,
          device='cuda:0',
          accumulate=True,
          a_step=16,
          load_saved=False,
          file_path='./saved_best.pt',
          use_dtp=False,
          pretrained_model='./bert_pretrain_model',
          tokenizer_model='bert-base-chinese',
          weighted_loss=False):
    device = device
    tokenizer = load_tokenizer(tokenizer_model)
    my_net = torch.load(file_path) if load_saved else Net(
        load_pretrained_model(pretrained_model))
    my_net.to(device, non_blocking=True)
    label_dict = dict()
    with open('./tianchi_datasets/label.json') as f:
        for line in f:
            label_dict = json.loads(line)
            break
    label_weights_dict = dict()
    with open('./tianchi_datasets/label_weights.json') as f:
        for line in f:
            label_weights_dict = json.loads(line)
            break
    ocnli_train = dict()
    with open('./tianchi_datasets/OCNLI/train.json') as f:
        for line in f:
            ocnli_train = json.loads(line)
            break
    ocnli_dev = dict()
    with open('./tianchi_datasets/OCNLI/dev.json') as f:
        for line in f:
            ocnli_dev = json.loads(line)
            break
    ocemotion_train = dict()
    with open('./tianchi_datasets/OCEMOTION/train.json') as f:
        for line in f:
            ocemotion_train = json.loads(line)
            break
    ocemotion_dev = dict()
    with open('./tianchi_datasets/OCEMOTION/dev.json') as f:
        for line in f:
            ocemotion_dev = json.loads(line)
            break
    tnews_train = dict()
    with open('./tianchi_datasets/TNEWS/train.json') as f:
        for line in f:
            tnews_train = json.loads(line)
            break
    tnews_dev = dict()
    with open('./tianchi_datasets/TNEWS/dev.json') as f:
        for line in f:
            tnews_dev = json.loads(line)
            break
    train_data_generator = Data_generator(ocnli_train, ocemotion_train,
                                          tnews_train, label_dict, device,
                                          tokenizer)
    dev_data_generator = Data_generator(ocnli_dev, ocemotion_dev, tnews_dev,
                                        label_dict, device, tokenizer)
    tnews_weights = torch.tensor(label_weights_dict['TNEWS']).to(
        device, non_blocking=True)
    ocnli_weights = torch.tensor(label_weights_dict['OCNLI']).to(
        device, non_blocking=True)
    ocemotion_weights = torch.tensor(label_weights_dict['OCEMOTION']).to(
        device, non_blocking=True)
    loss_object = Calculate_loss(label_dict,
                                 weighted=weighted_loss,
                                 tnews_weights=tnews_weights,
                                 ocnli_weights=ocnli_weights,
                                 ocemotion_weights=ocemotion_weights)
    optimizer = torch.optim.Adam(my_net.parameters(), lr=lr)
    best_dev_f1 = 0.0
    best_epoch = -1
    for epoch in range(epochs):
        my_net.train()
        train_loss = 0.0
        train_total = 0
        train_correct = 0
        train_ocnli_correct = 0
        train_ocemotion_correct = 0
        train_tnews_correct = 0
        train_ocnli_pred_list = []
        train_ocnli_gold_list = []
        train_ocemotion_pred_list = []
        train_ocemotion_gold_list = []
        train_tnews_pred_list = []
        train_tnews_gold_list = []
        cnt_train = 0
        while True:
            raw_data = train_data_generator.get_next_batch(batchSize)
            if raw_data == None:
                break
            data = dict()
            data['input_ids'] = raw_data['input_ids']
            data['token_type_ids'] = raw_data['token_type_ids']
            data['attention_mask'] = raw_data['attention_mask']
            data['ocnli_ids'] = raw_data['ocnli_ids']
            data['ocemotion_ids'] = raw_data['ocemotion_ids']
            data['tnews_ids'] = raw_data['tnews_ids']
            tnews_gold = raw_data['tnews_gold']
            ocnli_gold = raw_data['ocnli_gold']
            ocemotion_gold = raw_data['ocemotion_gold']
            if not accumulate:
                optimizer.zero_grad()
            ocnli_pred, ocemotion_pred, tnews_pred = my_net(**data)
            if use_dtp:
                tnews_kpi = 0.1 if len(
                    train_tnews_pred_list
                ) == 0 else train_tnews_correct / len(train_tnews_pred_list)
                ocnli_kpi = 0.1 if len(
                    train_ocnli_pred_list
                ) == 0 else train_ocnli_correct / len(train_ocnli_pred_list)
                ocemotion_kpi = 0.1 if len(
                    train_ocemotion_pred_list
                ) == 0 else train_ocemotion_correct / len(
                    train_ocemotion_pred_list)
                current_loss = loss_object.compute_dtp(tnews_pred, ocnli_pred,
                                                       ocemotion_pred,
                                                       tnews_gold, ocnli_gold,
                                                       ocemotion_gold,
                                                       tnews_kpi, ocnli_kpi,
                                                       ocemotion_kpi)
            else:
                current_loss = loss_object.compute(tnews_pred, ocnli_pred,
                                                   ocemotion_pred, tnews_gold,
                                                   ocnli_gold, ocemotion_gold)
            train_loss += current_loss.item()
            current_loss.backward()
            if accumulate and (cnt_train + 1) % a_step == 0:
                optimizer.step()
                optimizer.zero_grad()
            if not accumulate:
                optimizer.step()
            if use_dtp:
                good_tnews_nb, good_ocnli_nb, good_ocemotion_nb, total_tnews_nb, total_ocnli_nb, total_ocemotion_nb = loss_object.correct_cnt_each(
                    tnews_pred, ocnli_pred, ocemotion_pred, tnews_gold,
                    ocnli_gold, ocemotion_gold)
                tmp_good = sum(
                    [good_tnews_nb, good_ocnli_nb, good_ocemotion_nb])
                tmp_total = sum(
                    [total_tnews_nb, total_ocnli_nb, total_ocemotion_nb])
                train_ocemotion_correct += good_ocemotion_nb
                train_ocnli_correct += good_ocnli_nb
                train_tnews_correct += good_tnews_nb
            else:
                tmp_good, tmp_total = loss_object.correct_cnt(
                    tnews_pred, ocnli_pred, ocemotion_pred, tnews_gold,
                    ocnli_gold, ocemotion_gold)
            train_correct += tmp_good
            train_total += tmp_total
            p, g = loss_object.collect_pred_and_gold(ocnli_pred, ocnli_gold)
            train_ocnli_pred_list += p
            train_ocnli_gold_list += g
            p, g = loss_object.collect_pred_and_gold(ocemotion_pred,
                                                     ocemotion_gold)
            train_ocemotion_pred_list += p
            train_ocemotion_gold_list += g
            p, g = loss_object.collect_pred_and_gold(tnews_pred, tnews_gold)
            train_tnews_pred_list += p
            train_tnews_gold_list += g
            cnt_train += 1
            #torch.cuda.empty_cache()
            if (cnt_train + 1) % 1000 == 0:
                print('[', cnt_train + 1, '- th batch : train acc is:',
                      train_correct / train_total, '; train loss is:',
                      train_loss / cnt_train, ']')
        if accumulate:
            optimizer.step()
        optimizer.zero_grad()
        train_ocnli_f1 = get_f1(train_ocnli_gold_list, train_ocnli_pred_list)
        train_ocemotion_f1 = get_f1(train_ocemotion_gold_list,
                                    train_ocemotion_pred_list)
        train_tnews_f1 = get_f1(train_tnews_gold_list, train_tnews_pred_list)
        train_avg_f1 = (train_ocnli_f1 + train_ocemotion_f1 +
                        train_tnews_f1) / 3
        print(epoch, 'th epoch train average f1 is:', train_avg_f1)
        print(epoch, 'th epoch train ocnli is below:')
        print_result(train_ocnli_gold_list, train_ocnli_pred_list)
        print(epoch, 'th epoch train ocemotion is below:')
        print_result(train_ocemotion_gold_list, train_ocemotion_pred_list)
        print(epoch, 'th epoch train tnews is below:')
        print_result(train_tnews_gold_list, train_tnews_pred_list)

        train_data_generator.reset()

        my_net.eval()
        dev_loss = 0.0
        dev_total = 0
        dev_correct = 0
        dev_ocnli_correct = 0
        dev_ocemotion_correct = 0
        dev_tnews_correct = 0
        dev_ocnli_pred_list = []
        dev_ocnli_gold_list = []
        dev_ocemotion_pred_list = []
        dev_ocemotion_gold_list = []
        dev_tnews_pred_list = []
        dev_tnews_gold_list = []
        cnt_dev = 0
        with torch.no_grad():
            while True:
                raw_data = dev_data_generator.get_next_batch(batchSize)
                if raw_data == None:
                    break
                data = dict()
                data['input_ids'] = raw_data['input_ids']
                data['token_type_ids'] = raw_data['token_type_ids']
                data['attention_mask'] = raw_data['attention_mask']
                data['ocnli_ids'] = raw_data['ocnli_ids']
                data['ocemotion_ids'] = raw_data['ocemotion_ids']
                data['tnews_ids'] = raw_data['tnews_ids']
                tnews_gold = raw_data['tnews_gold']
                ocnli_gold = raw_data['ocnli_gold']
                ocemotion_gold = raw_data['ocemotion_gold']
                ocnli_pred, ocemotion_pred, tnews_pred = my_net(**data)
                if use_dtp:
                    tnews_kpi = 0.1 if len(
                        dev_tnews_pred_list
                    ) == 0 else dev_tnews_correct / len(dev_tnews_pred_list)
                    ocnli_kpi = 0.1 if len(
                        dev_ocnli_pred_list
                    ) == 0 else dev_ocnli_correct / len(dev_ocnli_pred_list)
                    ocemotion_kpi = 0.1 if len(
                        dev_ocemotion_pred_list
                    ) == 0 else dev_ocemotion_correct / len(
                        dev_ocemotion_pred_list)
                    current_loss = loss_object.compute_dtp(
                        tnews_pred, ocnli_pred, ocemotion_pred, tnews_gold,
                        ocnli_gold, ocemotion_gold, tnews_kpi, ocnli_kpi,
                        ocemotion_kpi)
                else:
                    current_loss = loss_object.compute(tnews_pred, ocnli_pred,
                                                       ocemotion_pred,
                                                       tnews_gold, ocnli_gold,
                                                       ocemotion_gold)
                dev_loss += current_loss.item()
                if use_dtp:
                    good_tnews_nb, good_ocnli_nb, good_ocemotion_nb, total_tnews_nb, total_ocnli_nb, total_ocemotion_nb = loss_object.correct_cnt_each(
                        tnews_pred, ocnli_pred, ocemotion_pred, tnews_gold,
                        ocnli_gold, ocemotion_gold)
                    tmp_good += sum(
                        [good_tnews_nb, good_ocnli_nb, good_ocemotion_nb])
                    tmp_total += sum(
                        [total_tnews_nb, total_ocnli_nb, total_ocemotion_nb])
                    dev_ocemotion_correct += good_ocemotion_nb
                    dev_ocnli_correct += good_ocnli_nb
                    dev_tnews_correct += good_tnews_nb
                else:
                    tmp_good, tmp_total = loss_object.correct_cnt(
                        tnews_pred, ocnli_pred, ocemotion_pred, tnews_gold,
                        ocnli_gold, ocemotion_gold)
                dev_correct += tmp_good
                dev_total += tmp_total
                p, g = loss_object.collect_pred_and_gold(
                    ocnli_pred, ocnli_gold)
                dev_ocnli_pred_list += p
                dev_ocnli_gold_list += g
                p, g = loss_object.collect_pred_and_gold(
                    ocemotion_pred, ocemotion_gold)
                dev_ocemotion_pred_list += p
                dev_ocemotion_gold_list += g
                p, g = loss_object.collect_pred_and_gold(
                    tnews_pred, tnews_gold)
                dev_tnews_pred_list += p
                dev_tnews_gold_list += g
                cnt_dev += 1
                #torch.cuda.empty_cache()
                #if (cnt_dev + 1) % 1000 == 0:
                #    print('[', cnt_dev + 1, '- th batch : dev acc is:', dev_correct / dev_total, '; dev loss is:', dev_loss / cnt_dev, ']')
            dev_ocnli_f1 = get_f1(dev_ocnli_gold_list, dev_ocnli_pred_list)
            dev_ocemotion_f1 = get_f1(dev_ocemotion_gold_list,
                                      dev_ocemotion_pred_list)
            dev_tnews_f1 = get_f1(dev_tnews_gold_list, dev_tnews_pred_list)
            dev_avg_f1 = (dev_ocnli_f1 + dev_ocemotion_f1 + dev_tnews_f1) / 3
            print(epoch, 'th epoch dev average f1 is:', dev_avg_f1)
            print(epoch, 'th epoch dev ocnli is below:')
            print_result(dev_ocnli_gold_list, dev_ocnli_pred_list)
            print(epoch, 'th epoch dev ocemotion is below:')
            print_result(dev_ocemotion_gold_list, dev_ocemotion_pred_list)
            print(epoch, 'th epoch dev tnews is below:')
            print_result(dev_tnews_gold_list, dev_tnews_pred_list)

            dev_data_generator.reset()

            if dev_avg_f1 > best_dev_f1:
                best_dev_f1 = dev_avg_f1
                best_epoch = epoch
                torch.save(my_net, file_path)
            print('best epoch is:', best_epoch, '; with best f1 is:',
                  best_dev_f1)
示例#7
0
@register_func("citeseer")
def citeseer_config(args):
    return args


@register_func("pubmed")
def pubmed_config(args):
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "unet")
示例#8
0
    if i <= 0:
        continue
    svm_clf = LinearSVC(C=i)
    svm_clf.fit(x_train, y_train)
    predicted = svm_clf.predict(x_valid)
    svm_acc = np.mean(predicted == y_valid)
    acc_dict[i] = svm_acc
    print("C {:.2f}: {:g}".format(i, svm_acc))

print(acc_dict)
print("")

x_train = vstack((x_train, x_valid))
y_train = np.concatenate((y_train, y_valid), axis=0)
max_C = max(acc_dict.keys(), key=(lambda key: acc_dict[key]))

print("x_train: {}".format(x_train.shape))
print("y_train: {}".format(y_train.shape))

svm_clf = LinearSVC(C=max_C)
svm_clf.fit(x_train, y_train)
predicted = svm_clf.predict(x_test)
svm_acc = np.mean(predicted == y_test)

utils.print_result(args.dataset,
                   "linear_svc",
                   svm_acc,
                   data_str,
                   str(int(time.time())),
                   hyperparams="{{C: {}}}".format(max_C))
示例#9
0
            if required_args is None:
                return {'error': 'Failed to prompt for arguments'}

            optional_args = prompt_args( method_info['opts'], lambda arghelp, argname: raw_input("optional: %s ('%s'): " % (arghelp, argname) ))
            if optional_args is None:
                return {'error': 'Failed to prompt for arguments'}

            full_args = [method_info['command']] + required_args + optional_args
            try:
                args, unknown_args = parser.parse_known_args( args=full_args )
            except SystemExit:
                # invalid arguments
                return {'error': 'Invalid arguments.  Please try again.'}

        result = method( args, config_path=config_path )
        return result

    # not found 
    return {'error': "No such command '%s'" % args.action}


if __name__ == '__main__':
    result = run_cli()
    if 'error' in result:
        exit_with_error(result['error'])
    else:
        print_result(result)
        sys.exit(0)


示例#10
0
def citeseer_config(args):
    args.weight_decay = 0.001
    return args


@register_func("pubmed")
def pubmed_config(args):
    args.weight_decay = 0.001
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "gat")
示例#11
0
文件: sign.py 项目: zhjhr181/cogdl
@register_func("citeseer")
def citeseer_config(args):
    return args


@register_func("pubmed")
def pubmed_config(args):
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "sign")
示例#12
0
文件: dgi.py 项目: zhjhr181/cogdl
@register_func("citeseer")
def citeseer_config(args):
    return args


@register_func("pubmed")
def pubmed_config(args):
    args.cpu = True
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "dgi")
beta_method = "fr" 
c1          = 1e-4 
c2          = .3 
restart     = 3 
ln_maxiter  = 100
#############################
optimizer = NCG(beta_method=beta_method, c1=c1, c2=c2, restart=restart, 
                ln_maxiter = ln_maxiter, norm_g_eps = ng_eps, l_eps = l_eps)
model_history, opt_history, time = optimize_monk_f(monk=monk, 
                                                   reg = reg, 
                                                   seed=seed, 
                                                   optimizer=optimizer, 
                                                   max_iter = max_iter, 
                                                   verbose = verbose)
print(" - NCG FR -")
print_result(f="Monk"+monk, opt=("NCG "+beta_method).upper(), c1=c1, c2=c2, r=restart, m="-", history=model_history, opt_history=opt_history, time=time, latex=True)
print_ls_result(ls_max_iter=ln_maxiter, opt_history=opt_history, latex=True)
f_fr = model_history["loss_mse_reg"]
p_fr = rate(model_history["loss_mse_reg"]) 
save_csv(path="./experiments/methods_comparisons/results/monk{}/m{}-{}_fr.csv".format(monk, monk, seed), f="loss_mse_reg", model_history=model_history, opt_history=opt_history)


#############################
#          NCG PR+
#############################
beta_method = "pr+" 
c1          = 1e-4 
c2          = .4
restart     = None
ln_maxiter  = 100
#############################
示例#14
0
文件: sgcpn.py 项目: zhjhr181/cogdl

def run(dataset_name, missing_rate=0, num_layers=40):
    args = build_default_args_for_node_classification(
        dataset_name, missing_rate=missing_rate, num_layers=num_layers)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    params0 = [("cora", 0, 7), ("citeseer", 0, 3), ("pubmed", 0, 9)]
    params20 = [("cora", 20, 7), ("citeseer", 20, 3), ("pubmed", 20, 7)]
    params40 = [("cora", 40, 7), ("citeseer", 40, 4), ("pubmed", 40, 60)]
    params60 = [("cora", 60, 20), ("citeseer", 60, 5), ("pubmed", 60, 7)]
    params80 = [("cora", 80, 25), ("citeseer", 80, 50), ("pubmed", 80, 60)]
    params100 = [("cora", 100, 40), ("citeseer", 100, 50), ("pubmed", 100, 40)]
    results = []
    for param in params0:
        results += run(dataset_name=param[0],
                       missing_rate=param[1],
                       num_layers=param[2])
    print_result(results, datasets, "sgcpn")
示例#15
0
        computer.output = []
        if not current_loc in panels:
            panels[current_loc] = 0
        i += 1
    return panels, current_loc, facing, i


def painted_image(panels):
    x_min = min([k for k in panels], key=lambda x: x[0])[0]
    y_min = min([k for k in panels], key=lambda x: x[1])[1]
    x_max = max([k for k in panels], key=lambda x: x[0])[0]
    y_max = max([k for k in panels], key=lambda x: x[1])[1]
    x = x_max - x_min
    y = y_max - y_min
    picture = [['.' for _ in range(x + 1)] for _ in range(y + 1)]
    for panel, colour in panels.items():
        picture[y_max - panel[1]][panel[0] -
                                  x_min] = '\u2588' if colour else '.'
    return '\n'.join(' '.join(p) for p in picture)


program = utils.get_input(11)
program = [int(i) for i in program[0].split(',')]

computer = Intcode(program)

first_star = len(operate_robot(computer, start_colour=0)[0])
computer.reset()
second_star = '\n' + painted_image(operate_robot(computer, start_colour=1)[0])
utils.print_result(first_star, second_star)
示例#16
0
文件: sagpool.py 项目: xssstory/cogdl
@register_func("proteins")
def proteins_config(args):
    return args


@register_func("collab")
def collab_config(args):
    args.degree_feature = True
    return args


def run(dataset_name):
    args = build_default_args_for_graph_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["mutag", "imdb-b", "imdb-m", "proteins", "collab"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "sagpool")
示例#17
0
@register_func("proteins")
def proteins_config(args):
    return args


@register_func("collab")
def collab_config(args):
    args.degree_feature = True
    return args


def run(dataset_name):
    args = build_default_args_for_graph_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["mutag", "imdb-b", "imdb-m", "proteins", "collab"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "hgpsl")
示例#18
0
文件: sortpool.py 项目: zrt/cogdl
@register_func("proteins")
def proteins_config(args):
    return args


@register_func("collab")
def collab_config(args):
    args.degree_feature = True
    return args


def run(dataset_name):
    args = build_default_args_for_graph_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["mutag", "imdb-b", "imdb-m", "proteins", "collab"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "sortpool")
def train(args, train_dataset, model):
    tb_writer = SummaryWriter()
    result_writer = ResultWriter(args.eval_results_dir)

    # Sampling 빈도 설정
    if args.sample_criteria is None:
        # 긍/부정 상황 & 투구구질 비율대로 Random sampling
        sampler = RandomSampler(train_dataset)
    else:
        # <투구구질> 기준(7 가지)으로 sampling 빈도 동등하게 조절
        if args.sample_criteria == "pitcher":
            counts = train_dataset.pitch_counts
            logger.info("  Counts of each ball type : %s", counts)
            pitch_contiguous = [
                i for p in train_dataset.origin_pitch for i, j in enumerate(p)
                if j == 1
            ]
            weights = [
                0 if p == 5 or p == 6 else 1.0 / counts[p]
                for p in pitch_contiguous
            ]
            sampler = WeightedRandomSampler(weights,
                                            len(train_dataset),
                                            replacement=True)
        # <긍,부정> 기준(2 가지)으로 sampling 빈도 동등하게 조절
        elif args.sample_criteria == "batter":
            counts = train_dataset.label_counts
            logger.info("  Counts of each label type : %s", counts)
            weights = [1.0 / counts[l] for l in train_dataset.label]
            sampler = WeightedRandomSampler(weights,
                                            len(train_dataset),
                                            replacement=True)
        # <투구구질 & 긍,부정> 기준(14 가지)으로 sampling 빈도 동등하게 조절
        elif args.sample_criteria == "both":
            counts = train_dataset.pitch_and_label_count
            logger.info("  Counts of each both type : %s", counts)
            pitch_contiguous = [
                i for p in train_dataset.origin_pitch for i, j in enumerate(p)
                if j == 1
            ]
            weights = [
                0 if p == 5 or p == 6 else 1.0 / counts[(p, l)]
                for p, l in zip(pitch_contiguous, train_dataset.label)
            ]
            sampler = WeightedRandomSampler(weights,
                                            len(train_dataset),
                                            replacement=True)
        else:
            sampler = RandomSampler(train_dataset)

    train_dataloader = DataLoader(
        train_dataset,
        batch_size=args.train_batch_size,
        sampler=sampler,
    )
    t_total = len(train_dataloader) * args.num_train_epochs
    args.warmup_step = int(args.warmup_percent * t_total)

    # Prepare optimizer and schedule (linear warmup and decay)
    no_decay = [
        "bias",
        "layernorm.weight",
    ]  # LayerNorm.weight -> layernorm.weight (model_parameter name)
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters()
                if not any(nd in n for nd in no_decay)
            ],
            "weight_decay":
            args.weight_decay,
        },
        {
            "params": [
                p for n, p in model.named_parameters()
                if any(nd in n for nd in no_decay)
            ],
            "weight_decay":
            0.0,
        },
    ]

    optimizer = optim.Adam(optimizer_grouped_parameters,
                           lr=args.learning_rate,
                           eps=args.adam_epsilon)
    if args.warmup_step != 0:
        scheduler_cosine = CosineAnnealingLR(optimizer, t_total)
        scheduler = GradualWarmupScheduler(optimizer,
                                           1,
                                           args.warmup_step,
                                           after_scheduler=scheduler_cosine)
        # scheduler_plateau = ReduceLROnPlateau(optimizer, "min")
        # scheduler = GradualWarmupScheduler(
        #     optimizer, 1, args.warmup_step, after_scheduler=scheduler_plateau
        # )
    else:
        scheduler = CosineAnnealingLR(optimizer, t_total)
        # scheduler = ReduceLROnPlateau(optimizer, "min")

    if args.fp16:
        try:
            from apex import amp
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
            )
        model, optimizer = amp.initialize(model,
                                          optimizer,
                                          opt_level=args.fp16_opt_level)
    m = torch.nn.Sigmoid()
    loss_fct = torch.nn.BCELoss()

    # Train!
    logger.info("***** Running Baseball Transformer *****")
    logger.info("  Num examples = %d", len(train_dataset))
    logger.info("  Num Epochs = %d", args.num_train_epochs)
    logger.info("  Warmup Steps = %d", args.warmup_step)
    logger.info("  Instantaneous batch size per GPU = %d",
                args.train_batch_size)
    logger.info("  Total train batch size = %d", args.train_batch_size)
    logger.info("  Total optimization steps = %d", t_total)

    global_step = 0
    epochs_trained = 0
    steps_trained_in_current_epoch = 0
    tr_loss, logging_loss, logging_val_loss = 0.0, 0.0, 0.0

    best_pitch_micro_f1, best_pitch_macro_f1, = 0, 0
    best_loss = 1e10

    model.zero_grad()
    train_iterator = trange(
        epochs_trained,
        int(args.num_train_epochs),
        desc="Epoch",
    )
    set_seed(args)  # Added here for reproducibility
    for _ in train_iterator:
        epoch_iterator = tqdm(train_dataloader, desc="Iteration")
        for step, batch in enumerate(epoch_iterator):

            (
                pitcher_discrete,
                pitcher_continuous,
                batter_discrete,
                batter_continuous,
                state_discrete,
                state_continuous,
                pitch,
                hit,
                label,
                masked_pitch,
                origin_pitch,
            ) = list(map(lambda x: x.to(args.device), batch))
            model.train()

            # sentiment input
            pitching_score = model(
                pitcher_discrete,
                pitcher_continuous,
                batter_discrete,
                batter_continuous,
                state_discrete,
                state_continuous,
                label,
                args.concat if args.concat else 0,
            )

            pitching_score = pitching_score.contiguous()
            pitch = pitch.contiguous()
            # with sigmoid(m)
            loss = loss_fct(m(pitching_score), pitch)

            if args.fp16:
                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()

            tr_loss += loss.item()

            if args.fp16:
                torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer),
                                               args.max_grad_norm)
            else:
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               args.max_grad_norm)
            optimizer.step()
            scheduler.step()
            model.zero_grad()
            global_step += 1

            if args.logging_steps > 0 and global_step % args.logging_steps == 0:
                # Log metrics
                if args.evaluate_during_training:
                    results, f1_results, f1_log, cm_pos, cm_neg = evaluate(
                        args, args.eval_data_file, model)
                    output_eval_file = os.path.join(args.output_dir,
                                                    "eval_results_pos.txt")
                    print_result(output_eval_file, results, f1_log, cm_pos)

                    for key, value in results.items():
                        tb_writer.add_scalar("eval_{}".format(key), value,
                                             global_step)
                    logging_val_loss = results["loss"]
                    # scheduler.step(logging_val_loss)

                tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
                tb_writer.add_scalar("loss", (tr_loss - logging_loss) /
                                     args.logging_steps, global_step)
                logging_loss = tr_loss
                if best_loss > results["loss"]:
                    best_pitch_micro_f1 = results["pitch_micro_f1"]
                    best_pitch_macro_f1 = results["pitch_macro_f1"]
                    best_loss = results["loss"]

                    output_dir = os.path.join(args.output_dir, "best_model/")
                    os.makedirs(output_dir, exist_ok=True)
                    torch.save(model.state_dict(),
                               os.path.join(output_dir, "pytorch_model.bin"))
                    torch.save(args,
                               os.path.join(output_dir, "training_args.bin"))
                    logger.info("Saving best model to %s", output_dir)

                    result_path = os.path.join(output_dir, "best_results.txt")
                    print_result(result_path,
                                 results,
                                 f1_log,
                                 cm_pos,
                                 off_logger=True)

                    results.update(dict(f1_results))
                    result_writer.update(args, **results)

                logger.info("  best pitch micro f1 : %s", best_pitch_micro_f1)
                logger.info("  best pitch macro f1 : %s", best_pitch_macro_f1)
                logger.info("  best loss : %s", best_loss)

            if args.save_steps > 0 and global_step % args.save_steps == 0:
                checkpoint_prefix = "checkpoint"
                # Save model checkpoint
                output_dir = os.path.join(
                    args.output_dir, "{}-{}".format(checkpoint_prefix,
                                                    global_step))
                os.makedirs(output_dir, exist_ok=True)
                torch.save(model.state_dict(),
                           os.path.join(output_dir, "pytorch_model.bin"))
                torch.save(args, os.path.join(output_dir, "training_args.bin"))
                logger.info("Saving model checkpoint to %s", output_dir)

                rotate_checkpoints(args, checkpoint_prefix)

                torch.save(optimizer.state_dict(),
                           os.path.join(output_dir, "optimizer.pt"))
                torch.save(scheduler.state_dict(),
                           os.path.join(output_dir, "scheduler.pt"))
                logger.info("Saving optimizer and scheduler states to %s",
                            output_dir)

    tb_writer.close()

    return global_step, tr_loss / global_step
示例#20
0
            if optional_args is None:
                return {'error': 'Failed to prompt for arguments'}

            full_args = [method_info['command']
                         ] + required_args + optional_args
            try:
                args, unknown_args = parser.parse_known_args(args=full_args)
            except SystemExit:
                # invalid arguments
                return {'error': 'Invalid arguments.  Please try again.'}

        result = method(args, config_path=config_path)
        return {'status': True, 'result': result, 'pragmas': pragmas}

    # not found
    return {'error': 'No such command "{}"'.format(args.action)}


if __name__ == '__main__':
    result = run_cli()
    if 'error' in result:
        exit_with_error(result['error'])
    else:
        if 'raw' in result['pragmas']:
            print(result['result'])

        else:
            print_result(result['result'])

        sys.exit(0)
示例#21
0
@register_func("pubmed")
def pubmed_config(args):
    return args


def run(dataset_name):
    unsup = False  # unsupervised or supervised node classification
    if unsup:
        args = build_default_args_for_unsupervised_node_classification(
            dataset_name)
    else:
        args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "graphsage")
示例#22
0
@register_func("citeseer")
def citeseer_config(args):
    args.dropout = 0.6
    return args


@register_func("pubmed")
def pubmed_config(args):
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "srgcn")
示例#23
0
文件: pprgo.py 项目: zhjhr181/cogdl
@register_func("reddit")
def reddit_config(args):
    return args


@register_func("ogbn-product")
def products_config(args):
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["pubmed", "reddit"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "pprgo")

# Training
# ==================================================

timestamp = str(int(time.time()))

# Linear Support Vector Classifier
if args.model == "all" or args.model == "linear_svc":
    svm_clf = LinearSVC(C=args.C)
    start = time.time()
    svm_clf.fit(x_train, y_train)
    train_time = time.time() - start
    predicted = svm_clf.predict(x_test)
    svm_acc = np.mean(predicted == y_test)
    utils.print_result(args.dataset, "linear_svc", svm_acc, data_str, timestamp,
                       hyperparams="{{C: {}}}".format(args.C))
    print("Time Taken: {:g}".format(train_time))

# Multinomial Naive Bayes Classifier
if args.model == "all" or args.model == "multinomial_nb":
    bayes_clf = MultinomialNB(alpha=args.alpha)
    start = time.time()
    bayes_clf.fit(x_train, y_train)
    train_time = time.time() - start
    predicted = bayes_clf.predict(x_test)
    bayes_acc = np.mean(predicted == y_test)
    utils.print_result(args.dataset, "multinomial_nb", bayes_acc, data_str, timestamp,
                       hyperparams="{{alpha: {}}}".format(args.alpha))
    print("Time Taken: {:g}".format(train_time))

# Save models as pickles
示例#25
0
def pubmed_config(args):
    args.hidden_size = 256
    args.proj_hidden_size = 256
    args.drop_edge_rates = [0.4, 0.1]
    args.drop_feature_rates = [0.0, 0.2]
    args.tau = 0.7
    args.lr = 0.001
    args.weight_decay = 0.00001
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "grace")
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        # Init model
        cnn = FCholletCNN(sequence_length=seq_len,
                          num_classes=len(train.class_names),
                          vocab_size=len(train.vocab),
                          embedding_size=embedding_dim,
                          embeddings=embeddings,
                          filter_widths=filter_widths,
                          num_features=num_features,
                          pooling_sizes=pooling_sizes,
                          fc_layers=fc_layers,
                          l2_reg_lambda=l2_reg_lambda)

        # Output directory for models and summaries
        timestamp = str(int(time.time()))
        out_dir = os.path.abspath(
            os.path.join(os.path.curdir, "runs", args.dataset, model_name,
                         timestamp))

        # Train and test model
        max_accuracy = train_and_test(sess, cnn, x_train, y_train, x_test,
                                      y_test, learning_rate, batch_size,
                                      num_epochs, dropout_keep_prob, out_dir)

        # Output for results.csv
        hyperparams = "{{filter_widths: {}, num_features: {}, pooling_sizes: {}, fc_layers: {}}}".format(
            filter_widths, num_features, pooling_sizes, fc_layers)
        utils.print_result(args.dataset, model_name, max_accuracy, data_str,
                           timestamp, hyperparams, args, args.notes)
示例#27
0
    def test(self, epoch):
        """
        testing
        """
        top1_error = np.zeros(len(self.segments))
        top5_error = np.zeros(len(self.segments))
        top1_loss = np.zeros(len(self.segments))

        for i in range(len(self.segments)):
            self.segments[i].eval()
            self.auxfc[i].eval()
        iters = len(self.test_loader)

        start_time = time.time()
        end_time = start_time
        for i, (images, labels) in enumerate(self.test_loader):
            start_time = time.time()
            data_time = start_time - end_time

            # if we use multi-gpu, its more efficient to send input to different gpu,
            # instead of send it to the master gpu.
            if self.settings.nGPU == 1:
                images = images.cuda()
            images_var = Variable(images, volatile=True)
            labels = labels.cuda()
            labels_var = Variable(labels, volatile=True)

            # forward
            outputs, losses = self.forward(images_var, labels_var)
            # print len(outputs), len(losses)

            # compute loss and error rate
            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=outputs,
                labels=labels_var,
                loss=losses,
                top5_flag=True,
                mean_flag=True)

            top1_loss += single_loss
            top1_error += single_error
            top5_error += single5_error

            end_time = time.time()
            iter_time = end_time - start_time

            utils.print_result(
                epoch,
                self.settings.nEpochs,
                i + 1,
                iters,
                self.lr_master.lr,
                data_time,
                iter_time,
                single_error,
                single_loss,
                mode="Test",
            )

        top1_loss /= iters
        top1_error /= iters
        top5_error /= iters
        """
        warning: for better comparison, we inverse the index of data
        """
        if self.logger is not None:
            length = len(top1_error) - 1
            for i, item in enumerate(top1_error):
                self.logger.scalar_summary("test_top1_error_%d" % (length - i),
                                           item, self.run_count)
                self.logger.scalar_summary("test_top5_error_%d" % (length - i),
                                           top5_error[i], self.run_count)
                self.logger.scalar_summary("test_loss_%d" % (length - i),
                                           top1_loss[i], self.run_count)
        self.run_count += 1

        print "|===>Testing Error: %.4f Loss: %.4f" % (top1_error[-1],
                                                       top1_loss[-1])
        return top1_error, top1_loss, top5_error
示例#28
0
@register_func("citeseer")
def citeseer_config(args):
    return args


@register_func("pubmed")
def pubmed_config(args):
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "mixhop")
示例#29
0
@register_func("citeseer")
def citeseer_config(args):
    return args


@register_func("pubmed")
def pubmed_config(args):
    return args


def run(dataset_name):
    args = build_default_args_for_node_classification(dataset_name)
    args = DATASET_REGISTRY[dataset_name](args)
    dataset, args = get_dataset(args)
    results = []
    for seed in args.seed:
        set_random_seed(seed)
        task = build_task(args, dataset=dataset)
        result = task.train()
        results.append(result)
    return results


if __name__ == "__main__":
    datasets = ["cora", "citeseer", "pubmed"]
    results = []
    for x in datasets:
        results += run(x)
    print_result(results, datasets, "ppnp")
    def test(self, epoch):
        """
        testing
        """
        top1_error = []
        top5_error = []
        top1_loss = []
        num_segments = len(self.segments)
        for i in range(num_segments):
            self.segments[i].eval()
            self.auxfc[i].eval()
            top1_error.append(utils.AverageMeter())
            top5_error.append(utils.AverageMeter())
            top1_loss.append(utils.AverageMeter())

        iters = len(self.test_loader)

        start_time = time.time()
        end_time = start_time
        for i, (images, labels) in enumerate(self.test_loader):
            start_time = time.time()
            data_time = start_time - end_time

            # if we use multi-gpu, its more efficient to send input to different gpu,
            # instead of send it to the master gpu.
            if self.settings.nGPU == 1:
                images = images.cuda()
            images_var = Variable(images, volatile=True)
            labels = labels.cuda()
            labels_var = Variable(labels, volatile=True)

            # forward
            outputs, losses = self.forward(images_var, labels_var)
            # print len(outputs), len(losses)

            # compute loss and error rate
            single_error, single_loss, single5_error = utils.compute_singlecrop(
                outputs=outputs,
                labels=labels_var,
                loss=losses,
                top5_flag=True,
                mean_flag=True)

            for j in range(num_segments):
                top1_error[j].update(single_error[j], images.size(0))
                top5_error[j].update(single5_error[j], images.size(0))
                top1_loss[j].update(single_loss[j], images.size(0))

            end_time = time.time()
            iter_time = end_time - start_time

            utils.print_result(
                epoch,
                self.settings.nEpochs,
                i + 1,
                iters,
                self.lr_master.lr,
                data_time,
                iter_time,
                single_error,
                single_loss,
                mode="Test",
            )
        """
        warning: for better comparison, we inverse the index of data
        """
        top1_error_list, top1_loss_list, top5_error_list = self._convert_results(
            top1_error=top1_error, top1_loss=top1_loss, top5_error=top5_error)
        if self.logger is not None:
            length = num_segments - 1
            for i in range(num_segments):
                self.logger.scalar_summary("test_top1_error_%d" % (length - i),
                                           top1_error[i].avg, self.run_count)
                self.logger.scalar_summary("test_top5_error_%d" % (length - i),
                                           top5_error[i].avg, self.run_count)
                self.logger.scalar_summary("test_loss_%d" % (length - i),
                                           top1_loss[i].avg, self.run_count)
        self.run_count += 1

        print "|===>Testing Error: %.4f/%.4f, Loss: %.4f" % (
            top1_error[-1].avg, top5_error[-1].avg, top1_loss[-1].avg)
        return top1_error_list, top1_loss_list, top5_error_list
示例#31
0
#!/usr/bin/env python
#-*- encoding: utf-8 -*-

"""
Get all network info from nova-network through RPC call to make
sure it is health.
"""

import nova_utils
import utils

DEBUG = utils.get_debug()

try:
    nova_utils.init_nova()
    network_info = nova_utils.rpccall_network('get_all_networks')
    assert network_info
    utils.log(network_info)
except Exception:
    if DEBUG:
        utils.print_traceback()
    result = 'failed'
else:
    result = 'success'

utils.print_result(result)
示例#32
0
            lr_pretrained=optim_params["lr_pretrained"]),
                               weight_decay=optim_params["weight_decay"])
    elif "SGD" == optim_params["name"]:
        optimizer = optim.SGD(net.get_params_lr(
            lr_not_pretrained=optim_params["lr_not_pretrained"],
            lr_pretrained=optim_params["lr_pretrained"]),
                              momentum=optim_params["momentum"],
                              weight_decay=optim_params["weight_decay"])

    # 学習
    train_net(net,
              train_loader,
              test_loader,
              optimizer=optimizer,
              loss_fn=loss_fn,
              epochs=params["epochs"],
              device=device)
    # 推論
    y, ypred = eval_net(net, test_loader, probability=True, device=device)

    # 正答率とネットワークの重みをリストに追加
    ys.append(y.cpu().numpy())
    ypreds.append(ypred.cpu().numpy())
    recall = recall_score(
        ys[-1], ypreds[-1].argmax(1), average=None, zero_division=0) * 100
    print("テストの各クラスrecall:\n{}\n平均:{}".format(
        np.round(recall, decimals=1), np.round(recall.mean(), decimals=1)))
    net_weights.append(net.cpu().state_dict())

utils.print_result(params, ys, ypreds)
utils.save_params(params, net_weights)
示例#33
0
            if required_args is None:
                return {'error': 'Failed to prompt for arguments'}

            optional_args = prompt_args( method_info['opts'], lambda arghelp, argname: raw_input("optional: %s ('%s'): " % (arghelp, argname) ))
            if optional_args is None:
                return {'error': 'Failed to prompt for arguments'}

            full_args = [method_info['command']] + required_args + optional_args
            try:
                args, unknown_args = parser.parse_known_args( args=full_args )
            except SystemExit:
                # invalid arguments
                return {'error': 'Invalid arguments.  Please try again.'}

        result = method( args, config_path=config_path )
        return result

    # not found 
    return {'error': "No such command '%s'" % args.action}


if __name__ == '__main__':
    result = run_cli()
    if 'error' in result:
        exit_with_error(result['error'])
    else:
        print_result(result)
        sys.exit(0)


示例#34
0
#!/usr/bin/env python
#-*- encoding: utf-8 -*-

import nova_utils
import utils

try:
    nova_utils.init_nova()
    host = nova_utils.get_random_host()
    utils.log(host)

    info = nova_utils.rpccall_scheduler('show_host_resources', host=host)
    assert info
    utils.log(info)

except Exception:
    utils.print_traceback()
    result = 'failed'
else:
    result = 'success'

utils.print_result(result)
            optional_args = prompt_args(method_info['opts'], prompt_func)
            if optional_args is None:
                return {'error': 'Failed to prompt for arguments'}

            full_args = [method_info['command']] + required_args + optional_args
            try:
                args, unknown_args = parser.parse_known_args(args=full_args)
            except SystemExit:
                # invalid arguments
                return {'error': 'Invalid arguments.  Please try again.'}

        result = method(args, config_path=config_path)
        return {'status': True, 'result': result, 'pragmas': pragmas}

    # not found
    return {'error': 'No such command "{}"'.format(args.action)}


if __name__ == '__main__':
    result = run_cli()
    if 'error' in result:
        exit_with_error(result['error'])
    else:
        if 'raw' in result['pragmas']:
            print(result['result'])

        else:
            print_result(result['result'])

        sys.exit(0)