Beispiel #1
0
def cross_session(data, label, subject_id, category_number, batch_size, iteration, lr, momentum, log_interval):
    target_data, target_label = copy.deepcopy(data[2][subject_id]), copy.deepcopy(label[2][subject_id])
    source_data, source_label = [copy.deepcopy(data[0][subject_id]), copy.deepcopy(data[1][subject_id])], [copy.deepcopy(label[0][subject_id]), copy.deepcopy(label[1][subject_id])]
    # one_sub_data, one_sub_label = data[i], label[i]
    # target_data, target_label = one_session_data.pop(), one_session_label.pop()
    # source_data, source_label = one_session_data.copy(), one_session_label.copy()
    # print(len(source_data))
    source_data_comb = np.vstack((source_data[0], source_data[1]))
    source_label_comb = np.vstack((source_label[0], source_label[1]))
    for j in range(1, len(source_data)):
        source_data_comb = np.vstack((source_data_comb, source_data[j]))
        source_label_comb = np.vstack((source_label_comb, source_label[j]))
    source_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data_comb, source_label_comb),
                                                            batch_size=batch_size,
                                                            shuffle=True,
                                                            drop_last=True)
    target_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(target_data, target_label),
                                                            batch_size=batch_size, 
                                                            shuffle=True, 
                                                            drop_last=True)
    model = DANNet(model=models.DAN(pretrained=False, number_of_category=category_number),
                source_loader=source_loader,
                target_loader=target_loader,
                batch_size=batch_size,
                iteration=iteration,
                lr=lr,
                momentum=momentum,
                log_interval=log_interval)
    # print(model.__getModel__())
    acc = model.train()
    return acc
Beispiel #2
0
 def __init__(self, model=models.DAN(), source_loader=0, target_loader=0, batch_size=64, iteration=10000, lr=0.001, momentum=0.9, log_interval=10):
     self.model = model 
     self.model.to(device)
     self.source_loader = source_loader
     self.target_loader = target_loader 
     self.batch_size = batch_size
     self.iteration = iteration
     self.lr = lr
     self.momentum = momentum
     self.log_interval = log_interval
Beispiel #3
0
def cross_subject(data, label, session_id, category_number, batch_size, iteration, lr, momentum, log_interval):
    # cross-subject, for 3 sessions, 1-14 as sources, 15 as target
    one_session_data, one_session_label = copy.deepcopy(data[session_id]), copy.deepcopy(label[session_id])
    target_data, target_label = one_session_data.pop(), one_session_label.pop()
    source_data, source_label = copy.deepcopy(one_session_data), copy.deepcopy(one_session_label.copy())
    # print(len(source_data))
    source_data_comb = source_data[0]
    source_label_comb = source_label[0]
    for j in range(1, len(source_data)):
        source_data_comb = np.vstack((source_data_comb, source_data[j]))
        source_label_comb = np.vstack((source_label_comb, source_label[j]))
    if bn == 'ele':
        source_data_comb = utils.norminy(source_data_comb)
        target_data = utils.norminy(target_data)
    elif bn == 'sample':
        source_data_comb = utils.norminx(source_data_comb)
        target_data = utils.norminx(target_data)
    elif bn == 'global':
        source_data_comb = utils.normalization(source_data_comb)
        target_data = utils.normalization(target_data)
    elif bn == 'none':
        pass
    else:
        pass
    # source_data_comb = utils.norminy(source_data_comb)
    # target_data = utils.norminy(target_data)
    source_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data_comb, source_label_comb),
                                                            batch_size=batch_size,
                                                            shuffle=True,
                                                            drop_last=True)
    # source_loaders = []
    # for j in range(len(source_data)):
    #     source_loaders.append(torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data[j], source_label[j]),
    #                                                         batch_size=batch_size,
    #                                                         shuffle=True,
    #                                                         drop_last=True))
    target_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(target_data, target_label),
                                                            batch_size=batch_size, 
                                                            shuffle=True, 
                                                            drop_last=True)
    model = DANNet(model=models.DAN(pretrained=False, number_of_category=category_number),
                source_loader=source_loader,
                target_loader=target_loader,
                batch_size=batch_size,
                iteration=iteration,
                lr=lr,
                momentum=momentum,
                log_interval=log_interval)
    # print(model.__getModel__())
    acc = model.train()
    return acc
Beispiel #4
0
def cross_subject(data, label, session_id, subject_id, category_number,
                  batch_size, iteration, lr, momentum, log_interval):
    ## LOSO
    one_session_data, one_session_label = copy.deepcopy(
        data[session_id]), copy.deepcopy(label[session_id])
    train_idxs = list(range(15))
    del train_idxs[subject_id]
    test_idx = subject_id
    target_data, target_label = one_session_data[test_idx], one_session_label[
        test_idx]
    source_data, source_label = copy.deepcopy(
        one_session_data[train_idxs]), copy.deepcopy(
            one_session_label[train_idxs])

    del one_session_label
    del one_session_data

    # print(len(source_data))
    source_data_comb = source_data[0]
    source_label_comb = source_label[0]
    for j in range(1, len(source_data)):
        source_data_comb = np.vstack((source_data_comb, source_data[j]))
        source_label_comb = np.vstack((source_label_comb, source_label[j]))
    source_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(
        source_data_comb, source_label_comb),
                                                batch_size=batch_size,
                                                shuffle=True,
                                                drop_last=True)
    target_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(
        target_data, target_label),
                                                batch_size=batch_size,
                                                shuffle=True,
                                                drop_last=True)
    model = DANNet(model=models.DAN(pretrained=False,
                                    number_of_category=category_number),
                   source_loader=source_loader,
                   target_loader=target_loader,
                   batch_size=batch_size,
                   iteration=iteration,
                   lr=lr,
                   momentum=momentum,
                   log_interval=log_interval)
    # print(model.__getModel__())
    acc = model.train()
    print('Target_subject_id: {}, current_session_id: {}, acc: {}'.format(
        test_idx, session_id, acc))
    return acc
Beispiel #5
0
                                                source_label_comb),
                    batch_size=batch_size,
                    shuffle=True,
                    drop_last=True)
                # source_loaders = []
                # for j in range(len(source_data)):
                #     source_loaders.append(torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data[j], source_label[j]),
                #                                                         batch_size=batch_size,
                #                                                         shuffle=True,
                #                                                         drop_last=True))
                target_loader = torch.utils.data.DataLoader(
                    dataset=utils.CustomDataset(target_data, target_label),
                    batch_size=batch_size,
                    shuffle=True,
                    drop_last=True)
                model = DANNet(model=models.DAN(
                    pretrained=False, number_of_category=category_number),
                               source_loader=source_loader,
                               target_loader=target_loader,
                               batch_size=batch_size,
                               iteration=iteration,
                               lr=lr,
                               momentum=momentum,
                               log_interval=log_interval)
                # print(model.__getModel__())
                acc = model.train()
                csub.append(acc)

            # cross-session, for 15 subjects, 1-2 as sources, 3 as target
            for i in range(15):
                target_data, target_label = copy.deepcopy(
                    data_tmp[2][i]), copy.deepcopy(label[2][i])
Beispiel #6
0
def cli_main():
    # torch.autograd.set_detect_anomaly(True)
    pl.seed_everything(1234)

    dataset = customdata.AusData("data/aus_data/D/D230201.mat",
                                 pad=False,
                                 normalise=True)
    train_size = int(0.6 * len(dataset))
    val_size = int(0.2 * len(dataset))
    test_size = int(0.2 * len(dataset))

    split_sum = train_size + val_size + test_size
    diff = len(dataset) - split_sum
    if diff > 0:
        while True:
            train_size += 1
            diff -= 1
            if diff == 0:
                break
            val_size += 1
            diff -= 1
            if diff == 0:
                break
            test_size += 1
            diff -= 1
            if diff == 0:
                break

    train_set, val_set, test_set = torch.utils.data.random_split(
        dataset, [train_size, val_size, test_size])
    tb_logger = pl_loggers.TensorBoardLogger('logs/')
    trainer = pl.Trainer(logger=tb_logger,
                         gpus=1,
                         auto_select_gpus=True,
                         max_epochs=50)
    dataset = customdata.SamsonDataset("sample_data/samson/samson_1.mat",
                                       "sample_data/samson/end3.mat",
                                       transform=transforms.Compose(
                                           [customdata.ToTensor()]))

    # shape = (610, 340, 103) 103==bands, 9 pixel types
    # dataset = customdata.PaviaUDataset("sample_data/paviaU/PaviaU.mat", transform=transforms.Compose([customdata.ToTensor()]))
    # dataset = customdata.ArtificalDataset((128, 128, 18), 5,
    #                                       transform=transforms.Compose([customdata.ToTensor()]))
    train_loader = DataLoader(dataset,
                              batch_size=256,
                              num_workers=32,
                              shuffle=True)

    # test_loader = DataLoader(dataset, batch_size=64, num_workers=32)
    val_loader = DataLoader(dataset, batch_size=600, num_workers=32)

    # ------------
    # model
    # ------------
    data = dataset.HCI
    # data = dataset.images
    # noises, cov = hysime.est_noise(data)
    # n_endmembers, b = hysime.hysime(data, noises, cov)
    data = np.transpose(data, (1, 0))

    weights, _, _ = vca.vca(data, 3)
    for i in range(3):
        weights[:, i] /= np.max(weights[:, i])
    model = models.DAN(dataset.shape[1],
                       3,
                       noise=0.1,
                       lr=1e-3,
                       weights=torch.from_numpy(weights).float())

    # ------------
    # training
    # ------------
    trainer.fit(model, train_loader, val_loader)
    model.eval()
    img = np.array([])
    gt = []
    for batch in test_loader:
        pixels = model.test_custom(batch.float())
    fig, axs = plt.subplots(1, 3)
    axs = axs.ravel()
    pixels = pixels.detach().numpy().reshape((95, 95, 3), order="F")
    axs[0].imshow(pixels[:, :, ::-1])
    axs[1].imshow(dataset.gt_A)

    axs[2].plot(model.decoder[0].weight[0, :].detach().numpy(), color="blue")
    axs[2].plot(model.decoder[0].weight[1, :].detach().numpy(), color="red")
    axs[2].plot(model.decoder[0].weight[2, :].detach().numpy(), color="orange")
    axs[2].plot(dataset.end_members[:, 0], color="orange", marker="+")
    axs[2].plot(dataset.end_members[:, 1], color="red", marker="+")
    axs[2].plot(dataset.end_members[:, 2], color="blue", marker="+")

    plt.show()