예제 #1
0
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    if cfg['raw']:
        data_loader = make_data_loader(dataset)['train']
        metric = Metric()
        img, label = [], []
        for i, input in enumerate(data_loader):
            input = collate(input)
            img.append(input['img'])
            label.append(input['label'])
        img = torch.cat(img, dim=0)
        label = torch.cat(label, dim=0)
        output = {'img': img, 'label': label}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
        dbi_result = evaluation['DBI']
        print('Davies-Bouldin Index ({}): {}'.format(cfg['data_name'],
                                                     dbi_result))
        save(dbi_result,
             './output/result/dbi_created_{}.npy'.format(cfg['data_name']),
             mode='numpy')
    else:
        created = np.load('./output/npy/created_{}.npy'.format(
            cfg['model_tag']),
                          allow_pickle=True)
        test(created)
    return
def runExperiment():
    cfg['batch_size']['train'] = cfg['batch_size']['test']
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset)
    model = eval(
        'models.{}(model_rate=cfg["global_model_rate"]).to(cfg["device"]).to(cfg["device"])'
        .format(cfg['model_name']))
    last_epoch, data_split, label_split, model, _, _, _ = resume(
        model, cfg['model_tag'], load_tag='best', strict=False)
    current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
    logger_path = 'output/runs/test_{}_{}'.format(cfg['model_tag'],
                                                  current_time)
    test_logger = Logger(logger_path)
    test_logger.safe(True)
    test(dataset['test'], model, test_logger, last_epoch)
    test_logger.safe(False)
    _, _, _, _, _, _, train_logger = resume(model,
                                            cfg['model_tag'],
                                            load_tag='checkpoint',
                                            strict=False)
    save_result = {
        'cfg': cfg,
        'epoch': last_epoch,
        'logger': {
            'train': train_logger,
            'test': test_logger
        }
    }
    save(save_result, './output/result/{}.pt'.format(cfg['model_tag']))
    return
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    if cfg['raw']:
        data_loader = make_data_loader(dataset)['train']
        metric = Metric()
        img = []
        for i, input in enumerate(data_loader):
            input = collate(input)
            img.append(input['img'])
        img = torch.cat(img, dim=0)
        output = {'img': img}
        evaluation = metric.evaluate(cfg['metric_name']['test'], None, output)
        is_result, fid_result = evaluation['InceptionScore'], evaluation['FID']
        print('Inception Score ({}): {}'.format(cfg['data_name'], is_result))
        print('FID ({}): {}'.format(cfg['data_name'], fid_result))
        save(is_result,
             './output/result/is_generated_{}.npy'.format(cfg['data_name']),
             mode='numpy')
        save(fid_result,
             './output/result/fid_generated_{}.npy'.format(cfg['data_name']),
             mode='numpy')
    else:
        generated = np.load('./output/npy/generated_{}.npy'.format(
            cfg['model_tag']),
                            allow_pickle=True)
        test(generated)
    return
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    data_loader = make_data_loader(dataset)
    model = eval('models.{}().to(cfg["device"])'.format(cfg['model_name']))
    optimizer = make_optimizer(model)
    scheduler = make_scheduler(optimizer)
    if cfg['resume_mode'] == 1:
        last_epoch, model, optimizer, scheduler, logger = resume(
            model, cfg['model_tag'], optimizer, scheduler)
    elif cfg['resume_mode'] == 2:
        last_epoch = 1
        _, model, _, _, _ = resume(model, cfg['model_tag'])
        current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        logger_path = 'output/runs/{}_{}'.format(cfg['model_tag'],
                                                 current_time)
        logger = Logger(logger_path)
    else:
        last_epoch = 1
        current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        logger_path = 'output/runs/train_{}_{}'.format(cfg['model_tag'],
                                                       current_time)
        logger = Logger(logger_path)
    if cfg['world_size'] > 1:
        model = torch.nn.DataParallel(model,
                                      device_ids=list(range(
                                          cfg['world_size'])))
    for epoch in range(last_epoch, cfg['num_epochs'] + 1):
        logger.safe(True)
        train(data_loader['train'], model, optimizer, logger, epoch)
        test(data_loader['train'], model, logger, epoch)
        if cfg['scheduler_name'] == 'ReduceLROnPlateau':
            scheduler.step(
                metrics=logger.mean['test/{}'.format(cfg['pivot_metric'])])
        else:
            scheduler.step()
        logger.safe(False)
        model_state_dict = model.module.state_dict(
        ) if cfg['world_size'] > 1 else model.state_dict()
        save_result = {
            'cfg': cfg,
            'epoch': epoch + 1,
            'model_dict': model_state_dict,
            'optimizer_dict': optimizer.state_dict(),
            'scheduler_dict': scheduler.state_dict(),
            'logger': logger
        }
        save(save_result,
             './output/model/{}_checkpoint.pt'.format(cfg['model_tag']))
        if cfg['pivot'] > logger.mean['test/{}'.format(cfg['pivot_metric'])]:
            cfg['pivot'] = logger.mean['test/{}'.format(cfg['pivot_metric'])]
            shutil.copy(
                './output/model/{}_checkpoint.pt'.format(cfg['model_tag']),
                './output/model/{}_best.pt'.format(cfg['model_tag']))
        logger.reset()
    logger.safe(False)
    return
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    model = eval('models.{}().to(cfg["device"])'.format(cfg['model_name']))
    _, model, _, _, _ = resume(model, cfg['model_tag'], load_tag='best')
    transit(model)
    return
예제 #6
0
def runExperiment():
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset)
    data_loader = make_data_loader(dataset)
    model = eval('models.{}(model_rate=cfg["global_model_rate"]).to(cfg["device"])'.format(cfg['model_name']))
    summary = summarize(data_loader['train'], model)
    content, total = parse_summary(summary)
    print(content)
    save_result = total
    save_tag = '{}_{}_{}'.format(cfg['data_name'], cfg['model_name'], cfg['model_mode'][0])
    save(save_result, './output/result/{}.pt'.format(save_tag))
    return
예제 #7
0
def runExperiment():
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    data_loader = make_data_loader(dataset)
    if 'pixelcnn' in cfg['model_name']:
        ae = eval('models.{}().to(cfg["device"])'.format(cfg['ae_name']))
    else:
        ae = None
    model = eval('models.{}().to(cfg["device"])'.format(cfg['model_name']))
    summary = summarize(data_loader['train'], model, ae)
    content = parse_summary(summary)
    print(content)
    return
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset)
    model = eval('models.{}(model_rate=cfg["global_model_rate"]).to(cfg["device"])'.format(cfg['model_name']))
    optimizer = make_optimizer(model, cfg['lr'])
    scheduler = make_scheduler(optimizer)
    if cfg['resume_mode'] == 1:
        last_epoch, data_split, label_split, model, optimizer, scheduler, logger = resume(model, cfg['model_tag'],
                                                                                          optimizer, scheduler)
    elif cfg['resume_mode'] == 2:
        last_epoch = 1
        _, data_split, label_split, model, _, _, _ = resume(model, cfg['model_tag'])
        current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        logger_path = 'output/runs/{}_{}'.format(cfg['model_tag'], current_time)
        logger = Logger(logger_path)
    else:
        last_epoch = 1
        data_split, label_split = split_dataset(dataset, cfg['num_users'], cfg['data_split_mode'])
        current_time = datetime.datetime.now().strftime('%b%d_%H-%M-%S')
        logger_path = 'output/runs/train_{}_{}'.format(cfg['model_tag'], current_time)
        logger = Logger(logger_path)
    if data_split is None:
        data_split, label_split = split_dataset(dataset, cfg['num_users'], cfg['data_split_mode'])
    global_parameters = model.state_dict()
    federation = Federation(global_parameters, cfg['model_rate'], label_split)
    for epoch in range(last_epoch, cfg['num_epochs']['global'] + 1):
        logger.safe(True)
        train(dataset['train'], data_split['train'], label_split, federation, model, optimizer, logger, epoch)
        test_model = stats(dataset['train'], model)
        test(dataset['test'], data_split['test'], label_split, test_model, logger, epoch)
        if cfg['scheduler_name'] == 'ReduceLROnPlateau':
            scheduler.step(metrics=logger.mean['train/{}'.format(cfg['pivot_metric'])])
        else:
            scheduler.step()
        logger.safe(False)
        model_state_dict = model.state_dict()
        save_result = {
            'cfg': cfg, 'epoch': epoch + 1, 'data_split': data_split, 'label_split': label_split,
            'model_dict': model_state_dict, 'optimizer_dict': optimizer.state_dict(),
            'scheduler_dict': scheduler.state_dict(), 'logger': logger}
        save(save_result, './output/model/{}_checkpoint.pt'.format(cfg['model_tag']))
        if cfg['pivot'] < logger.mean['test/{}'.format(cfg['pivot_metric'])]:
            cfg['pivot'] = logger.mean['test/{}'.format(cfg['pivot_metric'])]
            shutil.copy('./output/model/{}_checkpoint.pt'.format(cfg['model_tag']),
                        './output/model/{}_best.pt'.format(cfg['model_tag']))
        logger.reset()
    logger.safe(False)
    return
예제 #9
0
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    if 'pixelcnn' in cfg['model_name']:
        ae = eval('models.{}().to(cfg["device"])'.format(cfg['ae_name']))
        _, ae, _, _, _ = resume(ae, cfg['ae_tag'], load_tag='best')
    else:
        ae = None
    model = eval('models.{}().to(cfg["device"])'.format(cfg['model_name']))
    _, model, _, _, _ = resume(model, cfg['model_tag'], load_tag='best')
    generate(model, ae)
    return
예제 #10
0
def get_dataset(n_news):
    qry_news = 'select * from outer_data.interfax_news limit {}'.format(n_news)
    with pyodbc.connect('DSN=Impala;Database=prod_dct_sbx',
                        autocommit=True) as conn:
        df_news = pd.read_sql(qry_news, conn)
    print('SQL query completed')
    return utils.process_dataset(df_news)
예제 #11
0
def input_fn(dataset_filename, vocab_filename, num_channels=39, batch_size=8, num_epochs=1):
    dataset = utils.read_dataset(dataset_filename, num_channels)
    vocab_table = utils.create_vocab_table(vocab_filename)

    dataset = utils.process_dataset(
        dataset, vocab_table, utils.SOS, utils.EOS, batch_size, num_epochs)

    return dataset
def runExperiment():
    seed = int(cfg['model_tag'].split('_')[0])
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    data_loader = make_data_loader(dataset)
    model = eval('models.{}().to(cfg["device"])'.format(cfg['model_name']))
    load_tag = 'best'
    last_epoch, model, _, _, _ = resume(model, cfg['model_tag'], load_tag=load_tag)
    logger_path = 'output/runs/test_{}_{}'.format(cfg['model_tag'], datetime.datetime.now().strftime('%b%d_%H-%M-%S'))
    logger = Logger(logger_path)
    logger.safe(True)
    test(data_loader['train'], model, logger, last_epoch)
    logger.safe(False)
    save_result = {'cfg': cfg, 'epoch': last_epoch, 'logger': logger}
    save(save_result, './output/result/{}.pt'.format(cfg['model_tag']))
    return
예제 #13
0
파일: train.py 프로젝트: madved/phones-las
def input_fn(dataset_filename,
             vocab_filename,
             norm_filename=None,
             num_channels=39,
             batch_size=8,
             num_epochs=1,
             binf2phone=None,
             num_parallel_calls=32,
             max_frames=-1,
             max_symbols=-1):
    binary_targets = binf2phone is not None
    labels_shape = [] if not binary_targets else len(binf2phone.index)
    labels_dtype = tf.string if not binary_targets else tf.float32
    dataset = utils.read_dataset(dataset_filename,
                                 num_channels,
                                 labels_shape=labels_shape,
                                 labels_dtype=labels_dtype)
    vocab_table = utils.create_vocab_table(vocab_filename)

    if norm_filename is not None:
        means, stds = utils.load_normalization(args.norm)
    else:
        means = stds = None

    sos = binf2phone[utils.SOS].values if binary_targets else utils.SOS
    eos = binf2phone[utils.EOS].values if binary_targets else utils.EOS

    dataset = utils.process_dataset(dataset,
                                    vocab_table,
                                    sos,
                                    eos,
                                    means,
                                    stds,
                                    batch_size,
                                    num_epochs,
                                    binary_targets=binary_targets,
                                    labels_shape=labels_shape,
                                    num_parallel_calls=num_parallel_calls,
                                    max_frames=max_frames,
                                    max_symbols=max_symbols)

    return dataset
예제 #14
0
파일: infer.py 프로젝트: madved/phones-las
def input_fn(dataset_filename,
             vocab_filename,
             norm_filename=None,
             num_channels=39,
             batch_size=8,
             take=0,
             binf2phone=None):
    binary_targets = binf2phone is not None
    labels_shape = [] if not binary_targets else len(binf2phone.index)
    labels_dtype = tf.string if not binary_targets else tf.float32
    dataset = utils.read_dataset(dataset_filename,
                                 num_channels,
                                 labels_shape=labels_shape,
                                 labels_dtype=labels_dtype)
    vocab_table = utils.create_vocab_table(vocab_filename)

    if norm_filename is not None:
        means, stds = utils.load_normalization(args.norm)
    else:
        means = stds = None

    sos = binf2phone[utils.SOS].values if binary_targets else utils.SOS
    eos = binf2phone[utils.EOS].values if binary_targets else utils.EOS

    dataset = utils.process_dataset(dataset,
                                    vocab_table,
                                    sos,
                                    eos,
                                    means,
                                    stds,
                                    batch_size,
                                    1,
                                    binary_targets=binary_targets,
                                    labels_shape=labels_shape,
                                    is_infer=True)

    if args.take > 0:
        dataset = dataset.take(take)
    return dataset
예제 #15
0
def input_fn(features,
             vocab_filename,
             norm_filename=None,
             num_channels=39,
             batch_size=8,
             ground_truth=None):
    def gen():
        if ground_truth is not None:
            iterable = zip(features, ground_truth)
        else:
            iterable = features
        for item in iterable:
            yield item

    output_types = (tf.float32,
                    tf.string) if ground_truth is not None else tf.float32
    output_shapes = tf.TensorShape([None, features[0].shape[-1]])
    if ground_truth is not None:
        output_shapes = (output_shapes,
                         tf.TensorShape([None, ground_truth[0].shape[-1]]))
    dataset = tf.data.Dataset.from_generator(gen, output_types, output_shapes)
    vocab_table = utils.create_vocab_table(vocab_filename)

    if norm_filename is not None:
        means, stds = utils.load_normalization(norm_filename)
    else:
        means = stds = None

    dataset = utils.process_dataset(dataset,
                                    vocab_table,
                                    utils.SOS,
                                    utils.EOS,
                                    means,
                                    stds,
                                    min(features[0].shape[0], batch_size),
                                    1,
                                    is_infer=True)
    return dataset
예제 #16
0
def input_fn(features, vocab_filename, norm_filename=None):
    def gen():
        for item in features:
            yield item

    output_shapes = tf.TensorShape([None, features[0].shape[-1]])
    dataset = tf.data.Dataset.from_generator(gen, tf.float32, output_shapes)
    vocab_table = utils.create_vocab_table(vocab_filename)
    if norm_filename is not None:
        means, stds = utils.load_normalization(norm_filename)
    else:
        means = stds = None

    dataset = utils.process_dataset(dataset,
                                    vocab_table,
                                    utils.SOS,
                                    utils.EOS,
                                    means,
                                    stds,
                                    1,
                                    1,
                                    is_infer=True)
    return dataset
예제 #17
0
def runExperiment():
    dataset = fetch_dataset(cfg['data_name'], cfg['subset'])
    process_dataset(dataset['train'])
    data_loader = make_data_loader(dataset)
    test(data_loader['train'])
    return
    # instantiate agents and their corresponding traits
    agents, traits_agents = instantiate_agents()
    pickle.dump(traits_agents, traits_file)
    traits_file.close()
    # calculate total reputation by summing all the 0th elements of the reputation lists
    total_reputation = torch.tensor(0.0)
    for agent_id in range(args.num_agents):
        total_reputation += traits_agents[agent_id][2][0]
    # initialize optimizers
    optimizers = dict([(agent_id,
                        optim.Adam(agents[agent_id].parameters(),
                                   lr=args.learning_rate))
                       for agent_id in range(args.num_agents)])
    # divide dataset into train and test sets
    train_set = utils.process_dataset()
    # classes in CIFAR-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    # Initialize running mean of rewards
    rewards_mean = dict([(agent, torch.zeros(args.num_steps))
                         for agent in agents])
    running_rewards = 0.0
    num_consensus = 0  # number of times the community reaches consensus
    avg_steps, step = 0, 0  # time needed on as average to reach consensus

    for epoch in range(4):
        # start running episodes
        for episode_id in range(args.num_episodes):
            # pick up one sample from train dataset
예제 #19
0
    writer.add_embedding(msg,
                         metadata=[agent] * num_time_steps,
                         tag='Im_msg_' + str(agent) + '_EP_' + str(episode) +
                         '_lab_' + str(true_label) + '_' + str(policy_number))


if __name__ == '__main__':
    policy_step = 16000  # model step to be loaded
    agents, traits_agents = instantiate_agents(policy_step)
    # calculate total reputation by summing all the 0th elements of the reputation lists
    total_reputation = torch.tensor(0.0)
    for agent_id in range(args.num_agents):
        total_reputation += traits_agents[agent_id][2][0]

    # load test set
    test_set = utils.process_dataset(evaluate=True)
    # classes in CIFAR-10
    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    num_consensus = 0  # number of times the community reaches consensus
    running_rewards = 0.0
    avg_steps, step = 0, 0  # time needed on as average to reach consensus

    for episode_id in tqdm(range(args.num_test_episodes)):
        # pick up one sample from test dataset
        (img, target) = test_set[episode_id]
        img = img.unsqueeze(0)
        target = torch.tensor([target])

        # initialize / re-initialize all parameters
예제 #20
0
def run(args: argparse.ArgumentParser) -> None:
    torch.manual_seed(args.seed)

    dataset, hg, train_idx, valid_idx, test_idx = utils.process_dataset(
        args.dataset,
        root=args.dataset_root,
    )
    predict_category = dataset.predict_category
    labels = hg.nodes[predict_category].data['labels']

    training_device = torch.device('cuda' if args.gpu_training else 'cpu')
    inference_device = torch.device('cuda' if args.gpu_inference else 'cpu')

    inferfence_mode = args.inference_mode

    fanouts = [int(fanout) for fanout in args.fanouts.split(',')]

    train_sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
    train_dataloader = dgl.dataloading.NodeDataLoader(
        hg,
        {predict_category: train_idx},
        train_sampler,
        batch_size=args.batch_size,
        shuffle=True,
        drop_last=False,
        num_workers=args.num_workers,
    )

    if inferfence_mode == 'neighbor_sampler':
        valid_sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
        valid_dataloader = dgl.dataloading.NodeDataLoader(
            hg,
            {predict_category: valid_idx},
            valid_sampler,
            batch_size=args.eval_batch_size,
            shuffle=False,
            drop_last=False,
            num_workers=args.eval_num_workers,
        )

        if args.test_validation:
            test_sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts)
            test_dataloader = dgl.dataloading.NodeDataLoader(
                hg,
                {predict_category: test_idx},
                test_sampler,
                batch_size=args.eval_batch_size,
                shuffle=False,
                drop_last=False,
                num_workers=args.eval_num_workers,
            )
    else:
        valid_dataloader = None

        if args.test_validation:
            test_dataloader = None

    in_feats = hg.nodes[predict_category].data['feat'].shape[-1]
    out_feats = dataset.num_classes

    num_nodes = {}
    node_feats = {}

    for ntype in hg.ntypes:
        num_nodes[ntype] = hg.num_nodes(ntype)
        node_feats[ntype] = hg.nodes[ntype].data.get('feat')

    activations = {'leaky_relu': F.leaky_relu, 'relu': F.relu}

    embedding_layer = RelGraphEmbedding(hg, in_feats, num_nodes, node_feats)
    model = EntityClassify(
        hg,
        in_feats,
        args.hidden_feats,
        out_feats,
        args.num_bases,
        args.num_layers,
        norm=args.norm,
        layer_norm=args.layer_norm,
        input_dropout=args.input_dropout,
        dropout=args.dropout,
        activation=activations[args.activation],
        self_loop=args.self_loop,
    )

    loss_function = nn.CrossEntropyLoss()

    embedding_optimizer = torch.optim.SparseAdam(
        embedding_layer.node_embeddings.parameters(), lr=args.embedding_lr)

    if args.node_feats_projection:
        all_parameters = chain(model.parameters(),
                               embedding_layer.embeddings.parameters())
        model_optimizer = torch.optim.Adam(all_parameters, lr=args.model_lr)
    else:
        model_optimizer = torch.optim.Adam(model.parameters(),
                                           lr=args.model_lr)

    checkpoint = utils.Callback(args.early_stopping_patience,
                                args.early_stopping_monitor)

    print('## Training started ##')

    for epoch in range(args.num_epochs):
        train_time, train_loss, train_accuracy = train(
            embedding_layer,
            model,
            training_device,
            embedding_optimizer,
            model_optimizer,
            loss_function,
            labels,
            predict_category,
            train_dataloader,
        )
        valid_time, valid_loss, valid_accuracy = validate(
            embedding_layer,
            model,
            inference_device,
            inferfence_mode,
            loss_function,
            hg,
            labels,
            predict_category=predict_category,
            dataloader=valid_dataloader,
            eval_batch_size=args.eval_batch_size,
            eval_num_workers=args.eval_num_workers,
            mask=valid_idx,
        )

        checkpoint.create(
            epoch,
            train_time,
            valid_time,
            train_loss,
            valid_loss,
            train_accuracy,
            valid_accuracy,
            {
                'embedding_layer': embedding_layer,
                'model': model
            },
        )

        print(f'Epoch: {epoch + 1:03} '
              f'Train Loss: {train_loss:.2f} '
              f'Valid Loss: {valid_loss:.2f} '
              f'Train Accuracy: {train_accuracy:.4f} '
              f'Valid Accuracy: {valid_accuracy:.4f} '
              f'Train Epoch Time: {train_time:.2f} '
              f'Valid Epoch Time: {valid_time:.2f}')

        if checkpoint.should_stop:
            print('## Training finished: early stopping ##')

            break
        elif epoch >= args.num_epochs - 1:
            print('## Training finished ##')

    print(f'Best Epoch: {checkpoint.best_epoch} '
          f'Train Loss: {checkpoint.best_epoch_train_loss:.2f} '
          f'Valid Loss: {checkpoint.best_epoch_valid_loss:.2f} '
          f'Train Accuracy: {checkpoint.best_epoch_train_accuracy:.4f} '
          f'Valid Accuracy: {checkpoint.best_epoch_valid_accuracy:.4f}')

    if args.test_validation:
        print('## Test data validation ##')

        embedding_layer.load_state_dict(
            checkpoint.best_epoch_model_parameters['embedding_layer'])
        model.load_state_dict(checkpoint.best_epoch_model_parameters['model'])

        test_time, test_loss, test_accuracy = validate(
            embedding_layer,
            model,
            inference_device,
            inferfence_mode,
            loss_function,
            hg,
            labels,
            predict_category=predict_category,
            dataloader=test_dataloader,
            eval_batch_size=args.eval_batch_size,
            eval_num_workers=args.eval_num_workers,
            mask=test_idx,
        )

        print(f'Test Loss: {test_loss:.2f} '
              f'Test Accuracy: {test_accuracy:.4f} '
              f'Test Epoch Time: {test_time:.2f}')
예제 #21
0
    def import_dataset(self, dataset_filename, dataset_type='wiki',
                         year_filter=None,parts=1):
        self.clear(self.config["dataset"])
        f = open(dataset_filename,'r')
        
        voters, names, inv_names = \
                    utils.process_dataset(f, dataset_type, year_filter)
        n_nodes = len(names)
        print "Constructing utility matrix ..."
        
        # for i in names:
        #     print i, ': ', names[i]
        #
        # for i in candidates:
        #     print i, ': ', candidates[i]
        mtx_graph = [] 
        for v in voters:
            for c in sorted(voters[v]['votes'].keys()):
                vote = voters[v]['votes'][c]['VOT']
                if vote != '0':
                    mtx_graph.append(str(v) + '\t' + str(c) + '\t' + vote)
       
        year_str = '_'.join(year_filter) if year_filter else 'all'
        f = open(dataset_filename + '_' + year_str+ '.mtx','w')
        f.write('\n'.join(mtx_graph) + '\n')
        f.close()
            # for c in v['votes'].keys():
                
        # Reference point
        # ref_point = datetime.date(min([int(i) for i in no_date.keys()]), 1, 1)
        # ref_point = time.mktime(ref_point.timetuple())
        utility_matrix = np.array(np.zeros((n_nodes, n_nodes)))
        # time_matrix = np.zeros((count, count))
        utility_matrix[:] = np.NAN
        # time_matrix[:] = np.NAN
        
        # a = candidates.values()
        # a.sort()
        # print a, len(a)
        #Count nodes in the dataset
        # nodes = set({})
        for c in range(n_nodes):
            elec = []
            
            for v in voters:
                v_votes = voters[v]['votes']
                if not c in v_votes:
                    continue
                # Date pass to be a integer that means the minutes diference 
                # wrt the refrerence point
                date = 1#(time.mktime(v_votes[c]['DAT'])-ref_point)/60
                edge = int(v_votes[c]['VOT'])
                if edge != 0:
                    elec.append((date, edge, v))
                    #Conunting Nodes
                    # nodes.add(v_votes[c]['SRC'])
                
            
            #empty elections
            if len(elec) == 0:
                continue
            elec.sort()
            
            #Conunting Nodes
            # nodes.add(inv_candidates[c])

            # Min-max normalization. Min_delta = elec[0][0]-elec[0][0] =0
            # first_vote_time = elec[0][0]
            # _max_delta = elec[len(elec)-1][0] - first_vote_time + 0.1
            
            # Generate temporal votes
            for vote in elec:
                # vote_time = vote[0]
                # time_delta = 1-((vote_time-first_vote_time)/_max_delta)
                # enhanced_vote = int(math.ceil(time_delta*parts))*vote[1]
                utility_matrix[c][vote[2]] = vote[1]#enhanced_vote
                # time_matrix[c][vote[2]] = vote_time
                
            # print inv_candidates[c]
            # for i in utility_matrix[c]:
            #     if not np.isnan(i):
            #         print int(i)
            #
            # print
            # print 
            
        print 'Total nodes: ', n_nodes
        # print nodes
        # to_delete = []
        # print "Deleting nodes without votes..."
        # #Eleminate candidates without  votes
        # ut = utility_matrix.T
        # for i in range(0, len(utility_matrix)):
        #     if reduce(utils.land,map(np.isnan, utility_matrix[i])) and\
        #        reduce(utils.land,map(np.isnan, ut[i])):
        #         to_delete.append(i)
        #
        # to_delete.reverse()
        # for i in to_delete:
        #     utility_matrix = np.delete(utility_matrix, i, 0)
        #     utility_matrix = np.delete(utility_matrix, i, 1)      
            # time_matrix = np.delete(time_matrix, i, 0)
            # time_matrix = np.delete(time_matrix, i, 1)
            
        #
        # ut = utility_matrix.T
        # to_delete = []
        # print "Deleting voters without votes..."
        # #Eleminate voters without  votes
        # for i in range(0, len(ut)):
        #     if reduce(utils.land,map(np.isnan, ut[i])):
        #         to_delete.append(i)
        #
        # to_delete.reverse()
        # for i in to_delete:
        #     utility_matrix = np.delete(utility_matrix, i, 1)
        #     time_matrix = np.delete(time_matrix, i, 1)
            
                
        print utility_matrix.T
        # print time_matrix.T
        
        # self.save_matrix(utility_matrix, 'dataset/utility.npy')
        # self.save_matrix(time_matrix, 'dataset/time.npy')
        
            
        for v in voters.values():
            v['targets'] = set(v['votes'].keys())
    
        self.voters = voters