Ejemplo n.º 1
0
    torch.backends.cudnn.fastest = cudnn
    torch.cuda.set_device(device)

    correct = 0.0
    tp = 0.0
    size = 0.0
    # map_location: map cuda gpu numbers (specially for transfering from different workstations)
    model = torch.load(filename_model,
                       map_location=dict(
                           ('cuda:' + str(k), 'cuda:' + str(device))
                           for k in range(0, 100)))
    #model=torch.load(filename_model)
    model.train(False)
    testset = GenomeDataset(filename=filename_testset,
                            seq_name=['seq_ref_1', 'seq_ref_2'],
                            encode_mode=encode_mode)
    testloader = DataLoader(testset,
                            batch_size=batch_size,
                            shuffle=shuffle,
                            num_workers=num_workers)
    print('[INFO] testloader generated')

    probs_list = []
    labels_list = []
    rsids_list = []
    query_snp_rsids_list = []

    for data in tqdm(testloader):
        inputs1, inputs2, labels, rsids, query_snp_rsids = data
Ejemplo n.º 2
0
    elif scheduler == 'ReduceLROnPlateau':
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, **scheduler_param)

    if loss == 'CrossEntropyLoss':
        criterion = nn.CrossEntropyLoss(weight=class_imbalance)
    elif loss == 'NLLLoss':
        criterion = nn.NLLLoss(weight=class_imbalance)
    elif loss == 'MSELoss':
        criterion = nn.MSELoss(weight=class_imbalance)
    elif loss == 'MSELoss_weighted':
        criterion = weighted_mse_loss

    # read datasets
    trainset = GenomeDataset(filename=filename_trainset,
                             seq_name=['seq_ref_1', 'seq_ref_2'],
                             encode_mode=encode_mode)
    trainloader = DataLoader(trainset,
                             batch_size=batch_size,
                             shuffle=shuffle,
                             num_workers=num_workers)
    valset = GenomeDataset(filename=filename_valset,
                           seq_name=['seq_ref_1', 'seq_ref_2'],
                           encode_mode=encode_mode)
    valloader = DataLoader(valset,
                           batch_size=batch_size,
                           shuffle=shuffle,
                           num_workers=num_workers)

    dataloaders = {'train': trainloader, 'val': valloader}
    print("[INFO] dataloaders generated")