Exemple #1
0
def main(i):
    import torch

    from torch.autograd import Variable
    from trainer import fit
    import numpy as np
    cuda = torch.cuda.is_available()
    # Training settings

    parser = argparse.ArgumentParser(
        description='cross subject domain adaptation')

    parser.add_argument('--batch-size',
                        type=int,
                        default=10,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')

    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')

    # Writer will output to ./runs/ directory by default

    fold_idx = 4
    gamma = 1.0
    margin = 1.0

    DAsetting = False
    args = parser.parse_args()
    args.seed = 0
    args.use_tensorboard = True
    args.save_model = True
    n_epochs = 200
    startepoch = 0

    folder_name = 'exp1'
    comment = 'deep4' + str(fold_idx) + '_g_' + str(gamma) + '_m_' + str(
        margin)

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True
    device = torch.device("cuda:0" if use_cuda else "cpu")
    gpuidx = 0
    #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    from datetime import datetime
    import os
    loging = False

    x_data1, y_data1, sizes1 = load_speech(path='', data_type='o')
    x_data1 = x_data1[:, :, :, 50:]

    x_data2, y_data2, sizes2 = load_speech(path='')
    x_data2 = x_data2[:, :, :, 50:]
    #get subject number
    # y_subj = np.ones_like(y_data1)

    test_subj = 4

    print('test subj:' + str(test_subj))

    train_idx = np.r_[sum(sizes1[0:test_subj]):sum(sizes1[0:test_subj + 1])]
    test_idx = np.r_[sum(sizes2[0:test_subj]):sum(sizes2[0:test_subj + 1])]

    dataset_train = SpeechDataset(x=x_data1, y=y_data1, idx=train_idx)
    dataset_test = SpeechDataset(x=x_data2, y=y_data2, idx=test_idx)

    # triplet_dataset_train = TripletGiga2(x=x_data, y=y_subj, valtype=valtype, istrain=True, subj=train_subj,
    #                                      trial=trial_train)
    # # triplet_dataset_train2 = TripletGiga2(x=x_data[:,:,:,10:], y=y_subj, valtype=valtype, istrain=True, subj=train_subj,
    # #                                      trial=trial_train)
    # # triplet_dataset_train = triplet_dataset_train1.__add__(triplet_dataset_train2)
    #
    # triplet_dataset_test = TripletGiga2(x=x_data, y=y_subj, valtype=valtype, istrain=False, subj=test_subj,
    #                                    trial=trial_val)

    #

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test,
                                              batch_size=args.batch_size,
                                              shuffle=False)

    ###################################################################################################################
    # make model for metric learning
    # from networks import DWConvNet, basenet,Deep4Net_origin, Deep4Net, Deep4NetWs, EmbeddingDeep4CNN,EmbeddingDeep4CNN_bn, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN, TripletNet_conv_clf

    import get_common as gc
    # from losses import TripletLoss_dev2, TripLoss, ContrastiveLoss_dk
    margin = 1
    dgnet = gc.dgnet(gamma=gamma, margin=margin)
    model = dgnet.model
    if cuda:
        model.cuda(device)

    loss_fn = dgnet.loss_fn
    if cuda and (loss_fn is not None):
        loss_fn.cuda(device)

    optimizer = dgnet.optimizer
    milestones = dgnet.milestones
    scheduler = dgnet.scheduler
    exp_comment = dgnet.exp_comment

    print('____________DANet____________')
    print(model)

    model_save_path = 'model/' + folder_name + '/' + comment + '/'
    # if (args.save_model):
    #     if not os.path.isdir(model_save_path):
    #         os.makedirs(model_save_path)
    #
    # if startepoch > 0:
    #     load_model_path = model_save_path+'danet_'+str(gamma)+'_'+ str(startepoch) + '.pt'
    #     model_save_path = model_save_path +'(cont)'
    # else:
    #     load_model_path = None
    # if load_model_path is not None:
    #     model.load_state_dict(torch.load(load_model_path))

    maxacc = 0
    for epochidx in range(1, 100):
        # fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, epochidx, n_epochs, cuda,gpuidx, log_interval)
        print(epochidx)
        train(args,
              model.clf_net,
              device,
              train_loader,
              optimizer,
              scheduler,
              epoch=epochidx)
        eval_loss, eval_score, acc = eval(args, model.clf_net, device,
                                          train_loader)
        eval_loss, eval_score, acc = eval(args, model.clf_net, device,
                                          test_loader)
        if acc > maxacc:
            maxacc = acc

        # if args.use_tensorboard:
        #     writer.add_scalar('Train/Loss', np.mean(train_loss)/args.batch_size, epochidx)
        #     writer.add_scalar('Train/Acc', np.mean(train_score)/args.batch_size, epochidx)
        #     writer.add_scalar('Eval/Loss', np.mean(eval_loss)/args.batch_size, epochidx)
        #     writer.add_scalar('Eval/Acc', np.mean(eval_score)/args.batch_size, epochidx)
        #     writer.close()
        # if args.save_model:
        #     torch.save(model.state_dict(), model_save_path + 'danet_'+str(gamma)+'_'+ str(epochidx) + '.pt')

    train(args,
          model.clf_net,
          device,
          train_loader,
          optimizer,
          scheduler,
          epoch=epochidx)

    train_size = int(0.7 * len(dataset_test))
    test_size = len(dataset_test) - train_size
    dataset_train, dataset_test = torch.utils.data.random_split(
        dataset_test, [train_size, test_size])
    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test,
                                              batch_size=args.batch_size,
                                              shuffle=False)

    dgnet_ft = gc.dgnet(gamma=gamma, margin=margin)
    model_ft = dgnet_ft.model
    if cuda:
        model_ft.cuda(device)

    loss_fn = dgnet_ft.loss_fn
    if cuda and (loss_fn is not None):
        loss_fn.cuda(device)

    milestones = dgnet_ft.milestones
    scheduler = dgnet_ft.scheduler
    exp_comment = dgnet_ft.exp_comment

    model_ft.clf_net.embedding_net = model.clf_net.embedding_net
    for param in model_ft.clf_net.embedding_net.parameters():
        param.requires_grad = False

    if cuda:
        model_ft.cuda(device)

    loss_fn = dgnet_ft.loss_fn
    if cuda and (loss_fn is not None):
        loss_fn.cuda(device)

    optimizer = optim.SGD(model_ft.parameters(),
                          lr=0.0001,
                          momentum=0.9,
                          weight_decay=0.0005)

    for epochidx in range(1, 100):
        # fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, epochidx, n_epochs, cuda,gpuidx, log_interval)
        print(epochidx)
        train(args,
              model_ft.clf_net,
              device,
              train_loader,
              optimizer,
              scheduler,
              epoch=epochidx)
        eval_loss, eval_score, acc = eval(args, model_ft.clf_net, device,
                                          train_loader)
        eval_loss, eval_score, acc = eval(args, model_ft.clf_net, device,
                                          test_loader)
        if acc > maxacc:
            maxacc = acc

    return maxacc
Exemple #2
0
def main():
    import torch

    from torch.autograd import Variable
    from trainer import fit
    import numpy as np
    cuda = torch.cuda.is_available()
    # Training settings

    parser = argparse.ArgumentParser(
        description='cross subject domain adaptation')

    parser.add_argument('--batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')

    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')

    # Writer will output to ./runs/ directory by default

    fold_idx = 4
    gamma = 1.0
    margin = 1.0

    DAsetting = False
    args = parser.parse_args()
    args.seed = 0
    args.use_tensorboard = True
    args.save_model = True
    n_epochs = 200
    startepoch = 0

    folder_name = 'exp1'
    comment = 'deep4' + str(fold_idx) + '_g_' + str(gamma) + '_m_' + str(
        margin)

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True
    device = torch.device("cuda:0" if use_cuda else "cpu")
    gpuidx = 0
    #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    from datetime import datetime
    import os
    loging = False

    x_data, y_data = load_smt()
    x_data = x_data[:, :, :, 100:]
    #get subject number
    y_subj = np.zeros([108, 200])
    for i in range(108):
        y_subj[i, :] = i * 2
    y_subj = y_data.reshape(108, 200) + y_subj
    y_subj = y_subj.reshape(21600)
    #y_subj = np.concatenate([y_data,y_subj],axis=1)

    # plt.imshow(x_data[100,0,:,:])
    # For classification data
    valtype = 'subj'
    # if x_data.shape[2] != 60:
    #     x_data = x_data[:,:,2:,:]
    # plt.imshow(x_data[1000,0,:,:])
    # #subj - 0-27 train
    # train_subj1 = np.r_[0:27]
    # train_subj2 = np.r_[0:27]+54
    #
    # test_subj = np.r_[27:54,54+27:108]

    #chidx = np.r_[7:11, 12:15, 17:21, 32:41] #오연조건
    # chidx = np.r_[2:56, 60:62]
    # x_data = x_data[:,:,chidx,:]

    # For Domain adaptation setting
    if DAsetting:
        # test_subj = np.r_[fold_idx * 9:fold_idx * 9 + 9, fold_idx * 9 + 54:fold_idx * 9 + 9 + 54]
        test_subj_id = 39
        test_subj = np.r_[test_subj_id:test_subj_id + 1]
        train_subj1 = np.setxor1d(np.r_[0:108], test_subj)
        train_subj2 = test_subj

        n_targets = 60
        trial_s = (0, 200)
        trial_t = (0, n_targets)

        trial_val = (n_targets, 200)

        # dataset_train1 = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True,subj=train_subj1,trial=trial_s)
        dataset_train = GigaDataset(x=x_data,
                                    y=y_data,
                                    valtype=valtype,
                                    istrain=True,
                                    subj=train_subj2,
                                    trial=trial_t)
        # dataset_train = dataset_train1.__add__(dataset_train2)
        dataset_test = GigaDataset(x=x_data,
                                   y=y_data,
                                   valtype=valtype,
                                   istrain=False,
                                   subj=test_subj,
                                   trial=trial_val)

        triplet_dataset_train = TripletGigaDA(x=x_data,
                                              y=y_subj,
                                              valtype=valtype,
                                              istrain=True,
                                              subj_s=train_subj1,
                                              trial_s=trial_s,
                                              subj_t=train_subj2,
                                              trial_t=trial_t)

        # triplet_dataset_train2 = TripletGiga2(x=x_data, y=y_subj, valtype=valtype, istrain=True, subj=train_subj2, trial=trial_t)
        # triplet_dataset_train = triplet_dataset_train1.__add__(triplet_dataset_train2)

        triplet_dataset_test = TripletGigaDA(x=x_data,
                                             y=y_subj,
                                             valtype=valtype,
                                             istrain=True,
                                             subj_s=train_subj1,
                                             trial_s=trial_s,
                                             subj_t=test_subj,
                                             trial_t=trial_val)

    else:  #DG setting
        # test_subj = np.r_[fold_idx*9:fold_idx*9+9,fold_idx*9+54:fold_idx*9+9+54]
        # train_subj = test_subj
        # trial_train = (0, 30)
        # trial_val = (30, 200)
        #
        # bci_excellent = np.r_[43, 20, 27, 1, 28, 32, 35, 44, 36, 2]
        # bci_excellent = np.concatenate([bci_excellenth, bci_excellent + 54])

        test_subj = np.r_[fold_idx * 9:fold_idx * 9 + 9,
                          fold_idx * 9 + 54:fold_idx * 9 + 9 + 54]
        # train_subj = np.setdiff1d(bci_excellent, test_subj)
        # bci_excellent.sort()

        print('test subj:' + str(test_subj))
        train_subj = np.setdiff1d(np.r_[0:108], test_subj)

        trial_train = (0, 200)
        trial_val = (0, 200)

        dataset_train = GigaDataset(x=x_data,
                                    y=y_data,
                                    valtype=valtype,
                                    istrain=True,
                                    subj=train_subj,
                                    trial=trial_train)
        dataset_test = GigaDataset(x=x_data,
                                   y=y_data,
                                   valtype=valtype,
                                   istrain=False,
                                   subj=test_subj,
                                   trial=trial_val)

        triplet_dataset_train = TripletGiga2(x=x_data,
                                             y=y_subj,
                                             valtype=valtype,
                                             istrain=True,
                                             subj=train_subj,
                                             trial=trial_train)
        # triplet_dataset_train2 = TripletGiga2(x=x_data[:,:,:,10:], y=y_subj, valtype=valtype, istrain=True, subj=train_subj,
        #                                      trial=trial_train)
        # triplet_dataset_train = triplet_dataset_train1.__add__(triplet_dataset_train2)

        triplet_dataset_test = TripletGiga2(x=x_data,
                                            y=y_subj,
                                            valtype=valtype,
                                            istrain=False,
                                            subj=test_subj,
                                            trial=trial_val)

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test,
                                              batch_size=args.batch_size,
                                              shuffle=False)
    triplet_train_loader = torch.utils.data.DataLoader(
        triplet_dataset_train, batch_size=args.batch_size, shuffle=True)
    triplet_test_loader = torch.utils.data.DataLoader(
        triplet_dataset_test, batch_size=args.batch_size, shuffle=False)

    ###################################################################################################################
    # make model for metric learning
    # from networks import DWConvNet, basenet,Deep4Net_origin, Deep4Net, Deep4NetWs, EmbeddingDeep4CNN,EmbeddingDeep4CNN_bn, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN, TripletNet_conv_clf

    import get_common as gc
    from losses import TripletLoss_dev2, TripLoss, ContrastiveLoss_dk

    dgnet = gc.dgnet(gamma=gamma)
    model = dgnet.model
    if cuda:
        model.cuda(device)

    loss_fn = dgnet.loss_fn.cuda(device)

    log_interval = 10

    optimizer = dgnet.optimizer
    milestones = dgnet.milestones
    scheduler = dgnet.scheduler

    print('____________DANet____________')
    print(model)

    model_save_path = 'model/' + folder_name + '/' + comment + '/'
    if (args.save_model):
        if not os.path.isdir(model_save_path):
            os.makedirs(model_save_path)

    if args.use_tensorboard:
        writer = SummaryWriter(comment=comment)
        writer.add_text('optimizer', str(optimizer))
        writer.add_text('scheduler', str(milestones))
        writer.add_text('model_save_path', model_save_path)
        writer.add_text('model', str(model))
        writer.close()

    if startepoch > 0:
        load_model_path = model_save_path + 'danet_' + str(gamma) + '_' + str(
            startepoch) + '.pt'
        model_save_path = model_save_path + '(cont)'
    else:
        load_model_path = None
    if load_model_path is not None:
        model.load_state_dict(torch.load(load_model_path))

    for epochidx in range(1, 100):
        fit(triplet_train_loader, triplet_test_loader, model, loss_fn,
            optimizer, scheduler, epochidx, n_epochs, cuda, gpuidx,
            log_interval)
        print(epochidx)
        train_loss, train_score = eval(args, model.clf_net, device,
                                       train_loader)
        eval_loss, eval_score = eval(args, model.clf_net, device, test_loader)

        if args.use_tensorboard:
            writer.add_scalar('Train/Loss',
                              np.mean(train_loss) / args.batch_size, epochidx)
            writer.add_scalar('Train/Acc',
                              np.mean(train_score) / args.batch_size, epochidx)
            writer.add_scalar('Eval/Loss',
                              np.mean(eval_loss) / args.batch_size, epochidx)
            writer.add_scalar('Eval/Acc',
                              np.mean(eval_score) / args.batch_size, epochidx)
            writer.close()
        if args.save_model:
            torch.save(
                model.state_dict(), model_save_path + 'danet_' + str(gamma) +
                '_' + str(epochidx) + '.pt')
Exemple #3
0
def experiment(args):
    fold_idx = args.fold_idx
    startepoch = 0
    folder_name = args.folder_name
    comment = args.comment

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True
    device = args.device

    #data load
    x_data, y_data = load_smt(fs=250)
    # get subject number
    y_subj = np.zeros([108, 200])
    for i in range(108):
        y_subj[i, :] = i * 2
    y_subj = y_data.reshape(108, 200) + y_subj
    y_subj = y_subj.reshape(21600)

    valtype = 'subj'

    test_subj = np.r_[fold_idx * 9:fold_idx * 9 + 9,
                      fold_idx * 9 + 54:fold_idx * 9 + 9 + 54]
    print('test subj:' + str(test_subj))
    train_subj = np.setdiff1d(np.r_[0:108], test_subj)

    trial_train = (0, 200)
    trial_val = (0, 200)

    dataset_train = GigaDataset(x=x_data,
                                y=y_data,
                                valtype=valtype,
                                istrain=True,
                                subj=train_subj,
                                trial=trial_train)
    dataset_test = GigaDataset(x=x_data,
                               y=y_data,
                               valtype=valtype,
                               istrain=False,
                               subj=test_subj,
                               trial=trial_val)

    triplet_dataset_train = TripletGiga4(x=x_data,
                                         y=y_subj,
                                         valtype=valtype,
                                         istrain=True,
                                         subj=train_subj,
                                         trial=trial_train)

    triplet_dataset_test = TripletGiga4(x=x_data,
                                        y=y_subj,
                                        valtype=valtype,
                                        istrain=False,
                                        subj=test_subj,
                                        trial=trial_val)

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test,
                                              batch_size=args.batch_size,
                                              shuffle=False)
    triplet_train_loader = torch.utils.data.DataLoader(
        triplet_dataset_train, batch_size=args.batch_size, shuffle=True)
    triplet_test_loader = torch.utils.data.DataLoader(
        triplet_dataset_test, batch_size=args.batch_size, shuffle=False)

    #create model
    import get_common as gc
    dgnet = gc.dgnet(gamma=args.gamma, margin=args.margin)
    model = dgnet.model
    if cuda:
        model.cuda(device)

    loss_fn = dgnet.loss_fn
    if cuda and (loss_fn is not None):
        loss_fn.cuda(device)

    optimizer = dgnet.optimizer
    milestones = dgnet.milestones
    scheduler = dgnet.scheduler
    exp_comment = dgnet.exp_comment

    print(model)

    model_save_path = 'model/' + folder_name + '/' + comment + '/'
    if (args.save_model):
        if not os.path.isdir(model_save_path):
            os.makedirs(model_save_path)

    if args.use_tensorboard:
        writer = SummaryWriter(log_dir=args.log_dir)
        writer.add_text('exp', exp_comment)
        writer.add_text('optimizer', str(optimizer))
        writer.add_text('scheduler', str(milestones))
        writer.add_text('model_save_path', model_save_path)
        writer.add_text('model', str(model))
        model_save_path = writer.log_dir
        writer.close()

    if (args.save_model):
        if not os.path.isdir(model_save_path):
            os.makedirs(model_save_path)

    # if startepoch > 0:
    #     #     load_model_path = model_save_path + 'danet_' + str(args.gamma) + '_' + str(startepoch) + '.pt'
    #     #     model_save_path = model_save_path + '(cont)'
    #     # else:
    load_model_path = None
    # load_model_path ="C:\\Users\\Starlab\\PycharmProjects\\csdg\\exp0719\\Sep25_01-59-47_DESKTOP-186GIONsubj_sim_g_0.7_m_1.0danet_0.7_99.pt" #기존모델  있으면 경로
    if load_model_path is not None:
        model.load_state_dict(torch.load(load_model_path))

    acc_all = np.empty((1, 18))
    max = 0
    for epochidx in range(1, args.epochs):
        print(epochidx)
        fit(triplet_train_loader,
            triplet_test_loader,
            model,
            loss_fn,
            optimizer,
            scheduler,
            epochidx,
            args.epochs,
            cuda,
            args.gpuidx,
            log_interval=10)
        train_loss, train_score = eval(args, model.clf_net, device,
                                       train_loader)
        eval_loss, eval_score = eval(args, model.clf_net, device, test_loader)

        eval_temp = np.array(eval_score)
        eval_temp = eval_temp.reshape(4, 18)  #한폴드 9명 2세션 =18
        acc = eval_temp.mean(0) / args.batch_size
        acc_m = acc.mean(0)
        if acc_m > max:
            max = acc_m
        print("highest acc : ", max)

        acc_all = np.vstack([acc_all, acc])
        np.save('[DG]acc_all_' + str(args.fold_idx), acc_all)

        if args.use_tensorboard:
            for subj in range(18):
                writer.add_scalar(
                    'eachsubj/' + str(subj),
                    np.sum(eval_score[subj * 2:subj * 2 + 2]) / 200, epochidx)
            writer.add_scalar('Train/Loss',
                              np.mean(train_loss) / args.batch_size, epochidx)
            writer.add_scalar('Train/Acc',
                              np.mean(train_score) / args.batch_size, epochidx)
            writer.add_scalar('Eval/Loss',
                              np.mean(eval_loss) / args.batch_size, epochidx)
            writer.add_scalar('Eval/Acc',
                              np.mean(eval_score) / args.batch_size, epochidx)
            writer.close()
        if args.save_model:
            torch.save(
                model.state_dict(), model_save_path + 'danet_' +
                str(args.gamma) + '_' + str(epochidx) + '.pt')
    acc_all = np.delete(acc_all, [0, 0], axis=0)
Exemple #4
0
def main():
    import torch

    from torch.autograd import Variable
    from trainer import fit
    import numpy as np
    cuda = torch.cuda.is_available()
    # Training settings

    parser = argparse.ArgumentParser(
        description='cross subject domain adaptation')

    parser.add_argument('--batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=100,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')

    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=True,
                        help='For Saving the current Model')

    # Writer will output to ./runs/ directory by default

    fold_idx = 0
    gamma = 0.7
    margin = 1.0

    DAsetting = False
    args = parser.parse_args()
    args.seed = 0
    args.use_tensorboard = True
    args.save_model = True
    n_epochs = 200
    startepoch = 0

    folder_name = 'exp2'
    comment = '22ch_deep4' + str(fold_idx) + '_g_' + str(gamma) + '_m_' + str(
        margin)

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = True
    device = torch.device("cuda" if use_cuda else "cpu")
    gpuidx = 0
    #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    from datetime import datetime
    import os
    loging = False

    x_data, y_data = load_bcic(fs=250)
    y_subj = np.zeros([9, 576])
    for i in range(9):
        y_subj[i, :] = i * 2
    y_subj = y_data.reshape(9, 576) + y_subj
    y_subj = y_subj.reshape(9 * 576)

    valtype = 'subj'
    # if x_data.shape[2] != 60:

    test_subj = np.r_[2]
    # train_subj = np.setdiff1d(bci_excellent, test_subj)
    # bci_excellent.sort()

    print('test subj:' + str(test_subj))
    train_subj = np.setdiff1d(np.r_[0:9], test_subj)

    trial_train = (0, 576)
    trial_val = (0, 576)

    dataset_train = BCICDataset(x=x_data,
                                y=y_data,
                                valtype=valtype,
                                istrain=True,
                                subj=train_subj,
                                trial=trial_train)
    dataset_test = BCICDataset(x=x_data,
                               y=y_data,
                               valtype=valtype,
                               istrain=False,
                               subj=test_subj,
                               trial=trial_val)

    triplet_dataset_train = TripletBCIC(x=x_data,
                                        y=y_data,
                                        valtype=valtype,
                                        istrain=True,
                                        subj=train_subj,
                                        trial=trial_train)
    triplet_dataset_test = TripletBCIC(x=x_data,
                                       y=y_data,
                                       valtype=valtype,
                                       istrain=False,
                                       subj=test_subj,
                                       trial=trial_val)

    train_loader = torch.utils.data.DataLoader(dataset_train,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(dataset_test,
                                              batch_size=args.batch_size,
                                              shuffle=False)
    triplet_train_loader = torch.utils.data.DataLoader(
        triplet_dataset_train, batch_size=args.batch_size, shuffle=True)
    triplet_test_loader = torch.utils.data.DataLoader(
        triplet_dataset_test, batch_size=args.batch_size, shuffle=False)

    ###################################################################################################################
    # make model for metric learning
    # from networks import DWConvNet, basenet,Deep4Net_origin, Deep4Net, Deep4NetWs, EmbeddingDeep4CNN,EmbeddingDeep4CNN_bn, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN, TripletNet_conv_clf

    import get_common as gc
    from losses import TripletLoss_dev2, TripLoss, ContrastiveLoss_dk

    dgnet = gc.dgnet(gamma=gamma)
    model = dgnet.model
    if cuda:
        model.cuda(device)

    loss_fn = dgnet.loss_fn.cuda(device)

    log_interval = 10

    optimizer = dgnet.optimizer
    milestones = dgnet.milestones
    scheduler = dgnet.scheduler

    print('____________DANet____________')
    print(model)
    #
    # model_save_path = 'model/'+folder_name+'/'+comment+'/'
    # if (args.save_model):
    #     if not os.path.isdir(model_save_path):
    #         os.makedirs(model_save_path)
    #
    # if args.use_tensorboard:
    #     writer = SummaryWriter(comment=comment)
    #     writer.add_text('optimizer', str(optimizer))
    #     writer.add_text('scheduler', str(milestones))
    #     writer.add_text('model_save_path', model_save_path)
    #     writer.add_text('model', str(model))
    #     writer.close()

    load_model_path = 'C:\\Users\dk\PycharmProjects\csdg_exp2\model\exp3_22\danet_0.7_99.pt'
    # if startepoch > 0:
    #     load_model_path = model_save_path+'danet_'+str(gamma)+'_'+ str(startepoch) + '.pt'
    #     model_save_path = model_save_path +'(cont)'
    # else:
    #     load_model_path = None
    # if load_model_path is not None:
    # model.load_state_dict(torch.load(load_model_path,map_location='cuda:0'))
    #
    # for param in model.clf_net.parameters():
    #     param.requires_grad = False
    #
    #
    # model.clf_net.clf= nn.Sequential(nn.Linear(model.clf_net.embedding_net.num_hidden, 4),
    #                              nn.Dropout(),
    #                              nn.LogSoftmax(dim=1)).cuda()

    # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)

    # optimizer = optim.Adam(model.parameters(),lr=0.01)

    for epochidx in range(1, 200):
        fit(triplet_train_loader, triplet_test_loader, model, loss_fn,
            optimizer, scheduler, epochidx, n_epochs, cuda, gpuidx,
            log_interval)
        print(epochidx)

        # train(args, model.clf_net, device, train_loader, optimizer, scheduler)
        train_loss, train_score = eval(args, model.clf_net, device,
                                       train_loader)
        eval_loss, eval_score = eval(args, model.clf_net, device, test_loader)