コード例 #1
0
ファイル: evaluate.py プロジェクト: oucxlw/byol-a
def do_eval(weight, task='spcv2', unit_sec=1.0, repeat=1, epochs=200, early_stopping=True, seed=42):
    """Main program of linear evaluation."""

    # run deterministically
    seed_everything(seed)

    # load labels and corresponding pre-computed embeddings, Leave-One-Out CV flag, hidden layer sizes
    cfg, folds, loocv = prepare_linear_evaluation(weight, task, unit_sec)

    # run evaluation cycle
    results = {}
    for run_idx in range(repeat):
        if loocv:
            score = linear_eval_multi(folds, hidden_sizes=(), epochs=epochs,
                early_stopping=early_stopping, debug=(run_idx == 0))
        else:
            score = linear_eval_single(folds, hidden_sizes=(), epochs=epochs,
                early_stopping=early_stopping, debug=(run_idx == 0))
        results[f'run{run_idx}'] = score

    # calculate stats of scores
    scores = np.array(list(results.values()))
    m, s = scores.mean(), scores.std()
    model_name = Path(weight).stem
    results.update({'1_model': model_name, '2_mean': m, '3_std': s})
    logging.info(f' mean={m}, std={s}\n\n')

    # record score
    append_to_csv(f'results/{task}-scores.csv', results)
    print(m)
コード例 #2
0
def main():
    global args
    args = parser.parse_args()

    print()
    print('Command-line argument values:')
    for key, value in vars(args).items():
        print('-', key, ':', value)
    print()

    test_params = [
        args.model,
        path_to_save_string(args.dataset),
        path_to_save_string(args.test_dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    test_name = '_'.join([str(x) for x in test_params]) + '.pth'
    model_params = [
        args.model,
        path_to_save_string(args.dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    model_name = '_'.join([str(x) for x in model_params]) + '.pth'
    header = 'model,training-dataset,test-dataset,viewpoint_modulo,' \
             'batch_size,epochs,lr,weight_decay,seed,em_iters,accuracy'
    snapshot_path = os.path.join('.', 'snapshots', model_name)
    result_path = os.path.join('.', 'results', 'pytorch_test.csv')

    make_dirs_if_not_exist([snapshot_path, result_path])

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model, criterion, optimizer, scheduler = load_model(
        args.model,
        device_ids=args.device_ids,
        lr=args.lr,
        routing_iters=args.routing_iters)

    num_class, train_loader, test_loader = load_datasets(
        args.test_dataset, args.batch_size, args.test_batch_size,
        args.test_viewpoint_modulo)
    model.load_state_dict(torch.load(snapshot_path))
    acc, predictions, labels, logits = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=1)
    print(f'Accuracy: {acc:.2f}%')
    print(f'Memory usage: {gpu_memory_usage()}')

    to_write = test_params + [acc.cpu().numpy()]
    append_to_csv(result_path, to_write, header=header)

    if args.roc != '':
        make_dirs_if_not_exist(args.roc)
        torch.save((predictions, labels, logits), args.roc)
コード例 #3
0
ファイル: main.py プロジェクト: schlopmyflop/gpnt_assessment
def main():
    date_dict = get_date_dict(date_ranges)
    for year, _date_ranges in date_dict.items():
        for _start_date, _end_date in _date_ranges:
            store_all_data(start_date=_start_date, end_date=_end_date)

        print(f'appending to csv for {year}')
        append_to_csv(db)

    print(f'total time taken {datetime.now() - start}')
コード例 #4
0
        for file in os.listdir('./pcaps/{}'.format(domain)):
            if file.endswith(".pcap") and (pat.match(file) is None):
                # if i > 20:
                #     break

                # This is the pcap file we'll be reading at this point.
                file = os.path.join("./pcaps/{}".format(domain), file)

                # Read the pcap file.
                data = utils.read_pcap_file(file)

                # Append the data to the streams array.
                streams.append(data)

                # Append everything to the log.
                utils.append_to_csv(domain, data)

                # Add a label for the new file.
                labels.append(current_label)
                labels_str.append(domain)

                i += 1

        print(f"    {i} pcap files")

        # Increment the label
        current_label += 1

    # Finally train the classifier.
    utils.train(streams, labels)
コード例 #5
0
size = 15
delta = 1.0
min_ibound, max_ibound, nb_ibound = 4, 10, 3
ibounds = np.linspace(min_ibound, max_ibound, nb_ibound)
nb_experiments = 91
file_name = (
    'ibound[model={}_delta={:.1f}_ibound[min={:d}_max={:d}_num={:d}]].csv'.
    format(args.model_type, delta, min_ibound, max_ibound, nb_ibound))

for i in range(nb_experiments):
    for ibound in ibounds:
        ibound = int(ibound)
        model = model_protocol['generator'](size, delta)
        true_logZ = model_protocol['true_inference'](model)
        for ip in inference_protocols:
            if ip['use_ibound']:
                alg = ip['algorithm'](model, ibound)
            else:
                alg = ip['algorithm'](model)

            tic = time.time()
            logZ = alg.run(**ip['run_args'])
            err = np.abs(true_logZ - logZ)
            toc = time.time()

            print('Alg: {:15}, Error: {:15.4f}, Time: {:15.2f}'.format(
                ip['name'], err, toc - tic))

            utils.append_to_csv(file_name,
                                [ibound, ip['name'], err, toc - tic])
コード例 #6
0
    print(file_name)
    err_file_name = 'err_' + file_name
    time_file_name = 'time_' + file_name
    contents = utils.read_csv(file_name, dir_name=dir_name)
    err_dict, time_dict = dict(), dict()
    for c in contents:
        key1, key2, err, time = c
        if key1 not in err_dict:
            err_dict[key1] = OrderedDict()
            time_dict[key1] = OrderedDict()

        if key2 not in err_dict[key1]:
            err_dict[key1][key2] = []
            time_dict[key1][key2] = []

        err_dict[key1][key2].append(float(err))
        time_dict[key1][key2].append(float(time))

    for key1 in sorted(err_dict.keys()):
        print('Key 1: {}'.format(key1, list(err_dict[key1].keys())))
        avg_values, avg_times = [], []
        for key2 in sorted(err_dict[key1].keys(), key=alg_names.index):
            avg_values.append(np.mean(err_dict[key1][key2]) / np.log(10))
            avg_times.append(np.mean(time_dict[key1][key2]))
            print('Key 2: {:15}, Error: {:15.4f}, Time: {:15.2f}, Num: {:15}'.
                  format(key2, np.mean(err_dict[key1][key2]),
                         np.mean(time_dict[key1][key2]),
                         len(err_dict[key1][key2])))
        utils.append_to_csv(err_file_name, avg_values, dir_name=agg_dir_name)
        utils.append_to_csv(time_file_name, avg_times, dir_name=agg_dir_name)
コード例 #7
0
model_protocol = protocols.model_protocol_dict[args.model_type]
inference_protocols = [protocols.inference_protocol_dict[name] for name in args.algorithms]

size = 15
min_delta, max_delta, nb_delta = 0.0, 2.0, 9
deltas = np.linspace(min_delta, max_delta, nb_delta)
ibound = 10
nb_experiments = 10
file_name = "delta[model={}_ibound={:d}_delta[min={:.1f}_max={:.1f}_num={:d}]].csv".format(
    args.model_type, ibound, min_delta, max_delta, nb_delta
)

for i in range(nb_experiments):
    for delta in deltas:
        model = model_protocol["generator"](size, delta)
        true_logZ = model_protocol["true_inference"](model)
        for ip in inference_protocols:
            if ip["use_ibound"]:
                alg = ip["algorithm"](model, ibound)
            else:
                alg = ip["algorithm"](model)

            tic = time.time()
            logZ = alg.run(**ip["run_args"])
            err = np.abs(true_logZ - logZ)
            toc = time.time()

            print("Alg: {:15}, Error: {:15.4f}, Time: {:15.2f}".format(ip["name"], err, toc - tic))

            utils.append_to_csv(file_name, [delta, ip["name"], err, toc - tic])
コード例 #8
0
def main():
    global args
    args = parser.parse_args()

    print()
    print('Command-line argument values:')
    for key, value in vars(args).items():
        print('-', key, ':', value)
    print()

    params = [
        args.model,
        path_to_save_string(args.dataset), args.viewpoint_modulo,
        args.batch_size, args.epochs, args.lr, args.weight_decay, args.seed,
        args.routing_iters
    ]
    model_name = '_'.join([str(x) for x in params]) + '.pth'
    header = 'model,dataset,viewpoint_modulo,batch_size,epochs,lr,weight_decay,seed,em_iters,accuracy'
    snapshot_path = os.path.join('.', 'snapshots', model_name)
    data_path = os.path.join('.', 'results', 'training_data', model_name)
    result_path = os.path.join('.', 'results', 'pytorch_train.csv')

    make_dirs_if_not_exist([snapshot_path, data_path, result_path])

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    model, criterion, optimizer, scheduler = load_model(
        args.model,
        device_ids=args.device_ids,
        lr=args.lr,
        routing_iters=args.routing_iters)
    num_class, train_loader, test_loader = load_datasets(
        args.dataset, args.batch_size, args.test_batch_size,
        args.viewpoint_modulo)

    best_acc = 0
    training_accuracies = []
    test_accuracies = []

    if args.append:
        model.load_state_dict(torch.load(snapshot_path))
    try:
        for epoch in range(1, args.epochs + 1):
            print()
            acc = train(train_loader,
                        model,
                        criterion,
                        optimizer,
                        epoch,
                        epochs=args.epochs,
                        log_interval=args.log_interval)
            training_accuracies.append(acc)
            scheduler.step(acc)
            print('Epoch accuracy was %.1f%%. Learning rate is %.9f.' %
                  (acc, optimizer.state_dict()['param_groups'][0]['lr']))
            if epoch % args.test_interval == 0:
                test_acc, __, __, __ = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=args.test_size)
                test_accuracies.append(test_acc)
                if test_acc > best_acc:
                    best_acc = test_acc
    except KeyboardInterrupt:
        print('Cancelled training after %d epochs' % (epoch - 1))
        args.epochs = epoch - 1

    acc, predictions, labels, logits = test(test_loader,
                                            model,
                                            criterion,
                                            chunk=1)
    print(f'Accuracy: {acc:.2f}% (best: {best_acc:.2f}%)')

    to_write = params + [acc.cpu().numpy()]
    append_to_csv(result_path, to_write, header=header)
    snapshot(snapshot_path, model)
    #torch.save((accuracies, labels, predictions), data_path)

    if args.learn_curve != '':
        make_dirs_if_not_exist(args.learn_curve)
        torch.save((training_accuracies, test_accuracies), args.learn_curve)
コード例 #9
0
]

size = 15
min_delta, max_delta, nb_delta = 0.0, 2.0, 9
deltas = np.linspace(min_delta, max_delta, nb_delta)
ibound = 10
nb_experiments = 1
file_name = (
    'delta[model={}_ibound={:d}_delta[min={:.1f}_max={:.1f}_num={:d}]].csv'.
    format(model_type, ibound, min_delta, max_delta, nb_delta))

for i in range(nb_experiments):
    model = model_protocol['generator'](args.model_name)
    true_logZ = model_protocol['true_inference'](model)
    for ip in inference_protocols:
        if ip['use_ibound']:
            alg = ip['algorithm'](model, ibound)
        else:
            alg = ip['algorithm'](model)

        tic = time.time()
        logZ = alg.run(**ip['run_args'])
        err = np.abs(true_logZ - logZ)
        toc = time.time()

        print('Alg: {:15}, Error: {:15.4f}, Time: {:15.2f}'.format(
            ip['name'], err, toc - tic))

        utils.append_to_csv(file_name,
                            [args.model_name, ip['name'], err, toc - tic])