def main(): import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable from trainer import fit import numpy as np cuda = torch.cuda.is_available() # Training settings parser = argparse.ArgumentParser( description='cross subject domain adaptation') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=True, help='For Saving the current Model') # Writer will output to ./runs/ directory by default fold_idx = 4 gamma = 1.0 margin = 1.0 DAsetting = False args = parser.parse_args() args.seed = 0 args.use_tensorboard = True args.save_model = True n_epochs = 100 folder_name = 'exp7_deep100' comment = 'w/bn fold_' + str(fold_idx) + '_g_' + str(gamma) + '_m_' + str( margin) use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = True device = torch.device("cuda" if use_cuda else "cpu") #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} from datetime import datetime import os loging = False x_data, y_data = load_smt() #get subject number y_subj = np.zeros([108, 200]) for i in range(108): y_subj[i, :] = i * 2 y_subj = y_data.reshape(108, 200) + y_subj y_subj = y_subj.reshape(21600) #y_subj = np.concatenate([y_data,y_subj],axis=1) # For classification data valtype = 'subj' # if x_data.shape[2] != 60: # x_data = x_data[:,:,2:,:] # plt.imshow(x_data[1000,0,:,:]) # #subj - 0-27 train # train_subj1 = np.r_[0:27] # train_subj2 = np.r_[0:27]+54 # # test_subj = np.r_[27:54,54+27:108] #chidx = np.r_[7:11, 12:15, 17:21, 32:41] #오연조건 # chidx = np.r_[2:56, 60:62] # x_data = x_data[:,:,chidx,:] # For Domain adaptation setting if DAsetting: train_subj1 = np.r_[27:54] train_subj2 = np.r_[27:54] + 54 test_subj = np.r_[0:27, 54 + 0:54 + 27] trial_s = (0, 200) trial_t = (0, 200) trial_val = (0, 200) dataset_train1 = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj1, trial=trial_s) dataset_train2 = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj2, trial=trial_t) dataset_train = dataset_train1.__add__(dataset_train2) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) triplet_dataset_train1 = TripletGiga(x=x_data, y=y_subj, valtype=valtype, istrain=True, subj=train_subj1, trial=trial_s) triplet_dataset_train2 = TripletGiga(x=x_data, y=y_subj, valtype=valtype, istrain=True, subj=train_subj2, trial=trial_t) triplet_dataset_train = triplet_dataset_train1.__add__( triplet_dataset_train2) triplet_dataset_test = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) else: #DG setting test_subj = np.r_[fold_idx * 9:fold_idx * 9 + 9, fold_idx * 9 + 54:fold_idx * 9 + 9 + 54] print('test subj:' + str(test_subj)) train_subj = np.setxor1d(np.r_[0:108], test_subj) trial_train = (0, 200) trial_val = (0, 200) dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj, trial=trial_train) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) triplet_dataset_train = TripletGiga2(x=x_data, y=y_subj, valtype=valtype, istrain=True, subj=train_subj, trial=trial_train) triplet_dataset_test = TripletGiga2(x=x_data, y=y_subj, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False) triplet_train_loader = torch.utils.data.DataLoader( triplet_dataset_train, batch_size=args.batch_size, shuffle=True) triplet_test_loader = torch.utils.data.DataLoader( triplet_dataset_test, batch_size=args.batch_size, shuffle=False) ################################################################################################################### # make model for metric learning from networks import basenet, Deep4Net, EmbeddingDeep4CNN, EmbeddingDeep4CNN_bn, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN from losses import TripletLoss_dev2, TripLoss embedding_net = Deep4Net() print(embedding_net) model = TripletNet(embedding_net) #exp3-1 fc레이어 한층더 # model.fc = nn.Sequential( # nn.Linear(model.num_hidden,128), # nn.ReLU(), # nn.Dropout(), # nn.Linear(128,2) # ) if cuda: model.cuda() loss_fn = TripletLoss_dev2(margin, gamma).cuda() log_interval = 10 # ########################################################## # optimizer = optim.Adam(model.parameters(), lr=0.01) # scheduler = lr_scheduler.StepLR(optimizer, 10, gamma=1, last_epoch=-1) # exp1 : 62ch 0~5fold까지 셋팅 # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # scheduler = lr_scheduler.StepLR(optimizer, 5, gamma=0.5, last_epoch=-1) #exp2 : 운동영역주변 20ch, train성능이 fit하지 않는 현상이 g=0.7,1.0 양족에서 모두 나타나서, 기존의 러닝레이트보다 강하게 줘보고 실험코자함 # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # scheduler = lr_scheduler.StepLR(optimizer, 5, gamma=1.0, last_epoch=-1) # # #exp4, exp5 optimizer = optim.SGD(model.parameters(), lr=0.005 / gamma, momentum=0.9) scheduler = lr_scheduler.StepLR(optimizer, 5, gamma=0.8, last_epoch=-1) #너무 빨리 떨구면 언더피팅하는듯 # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # scheduler = lr_scheduler.StepLR(optimizer, 5, gamma=0.8, last_epoch=-1) #너무 빨리 떨구면 언더피팅하는듯 # exp5 # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # scheduler = lr_scheduler.StepLR(optimizer, 10, gamma=0.5, last_epoch=-1) #model for validation evalmodel = nn.Sequential(model.embedding_net, model.fc, nn.LogSoftmax(dim=1)).to(device) print('____________DANet____________') print(model) #save someting if (args.save_model): model_save_path = 'model/' + folder_name + '/' + comment + '/' if not os.path.isdir(model_save_path): os.makedirs(model_save_path) if loging: fname = model_save_path + datetime.today().strftime( "%m_%d_%H_%M") + ".txt" f = open(fname, 'w') if args.use_tensorboard: writer = SummaryWriter(comment=comment) # load_model_path = 'C:\\Users\dk\PycharmProjects\giga_cnn\model\deep100_negsubj\\fold_0_g_0.7\danet_0.7_49.pt' #'C:\\Users\dk\PycharmProjects\giga_cnn\구모델\\clf_83_8.pt'#'clf_29.pt' #'triplet_mg26.pt'#'clf_triplet2_5.pt' #'triplet_31.pt' load_model_path = 'C:\\Users\dk\PycharmProjects\giga_cnn\model\exp6_basenet\\fold_0_g_0.6\danet_0.6_86.pt' load_model_path = None if load_model_path is not None: model.load_state_dict(torch.load(load_model_path)) for epochidx in range(n_epochs): fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, epochidx, n_epochs, cuda, log_interval) print(epochidx) train_loss, train_score = eval(args, evalmodel, device, train_loader) eval_loss, eval_score = eval(args, evalmodel, device, test_loader) if args.use_tensorboard: writer.add_scalar('Train/Loss', np.mean(train_loss) / 100, epochidx) writer.add_scalar('Train/Acc', np.mean(train_score) / 100, epochidx) writer.add_scalar('Eval/Loss', np.mean(eval_loss) / 100, epochidx) writer.add_scalar('Eval/Acc', np.mean(eval_score) / 100, epochidx) writer.close() if args.save_model: torch.save( model.state_dict(), model_save_path + 'danet_' + str(gamma) + '_' + str(epochidx) + '.pt')
def main(): import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable from trainer import fit import numpy as np cuda = torch.cuda.is_available() # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=True, help='For Saving the current Model') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} from datetime import datetime import os loging = False ismultitask = False loso = False if (args.save_model): model_save_path = 'model/triplet/' if not os.path.isdir(model_save_path): os.makedirs(model_save_path) if loging: fname = model_save_path + datetime.today().strftime( "%m_%d_%H_%M") + ".txt" f = open(fname, 'w') x_data, y_data = load_smt() y_subj = np.zeros([108, 200]) for i in range(108): y_subj[i, :] = i * 2 y_subj = y_data.reshape(108, 200) + y_subj y_subj = y_subj.reshape(21600) # nonbciilli = np.s_[0,1,2,4,5,8,16,17,18,20,21,27,28,29,30,32,35,36,38,42,43,44,51] valtype = 'sess' if valtype == 'loso': for subj in range(0, 54): model = Deep4CNN(ismult=ismultitask).to(device) #model.load_state_dict(torch.load(model_save_path+ "J_" + str(subj) + 'basecnn.pt')) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) optimizer_fine = optim.SGD(model.parameters(), lr=0.005, momentum=args.momentum) dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1, subj=subj) train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=subj) test_loader = torch.utils.data.DataLoader( dataset_test, batch_size=args.batch_size, shuffle=False, **kwargs) # dataset_fine = GigaDataset_LOSO(x=x_data, y=y_data, fine=True, istrain=True, sess=2, subj=subj) # fine_loader = torch.utils.data.DataLoader(dataset_fine, batch_size=args.batch_size, shuffle=True, **kwargs) for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) print("joint-train") #LOSO joint training j_loss, j_score = eval(args, model, device, test_loader) if epoch > 30: if (args.save_model): torch.save( model.state_dict(), model_save_path + "model_" + str(subj) + "_" + str(epoch) + '.pt') # #fine tuning # for epoch in range(1, 10): # train_mt(args, model, device, fine_loader, optimizer_fine, epoch) # # print("fine-tuning") # f_loss, f_score = eval(args, model, device, test_loader) if (args.save_model): torch.save(model.state_dict(), model_save_path + "F_" + str(subj) + 'basecnn.pt') if loging: f = open(fname, 'a') f.write( str(subj) + " " + "jl : " + str(j_loss) + " " + str(j_score) + '\n') f.close() elif valtype == 'sess': from networks import EmbeddingDeep4CNN, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet from losses import TripletLoss_dev2 # make model for metric learning margin = 1 embedding_net = EmbeddingDeep4CNN() # clf_net = nn.Sequential(EmbeddingDeep4CNN(),nn.Linear(1000,2),nn.Dropout(p=1),nn.LogSoftmax(dim=1)) print(embedding_net) model = TripletNet(embedding_net) if cuda: model.cuda() loss_fn = TripletLoss_dev2(margin).cuda() n_epochs = 1 log_interval = 10 load_model_path = None #'triplet_mg26.pt'#'clf_triplet2_5.pt' #'triplet_31.pt' model.fc = nn.Sequential(nn.Linear(1000, 2), nn.Dropout(p=0.5)) if load_model_path is not None: model.load_state_dict(torch.load(load_model_path)) # For classification dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=-1) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, **kwargs) #make model for classification newmodel = nn.Sequential(model.embedding_net, nn.Linear(1000, 2), nn.LogSoftmax(dim=1)).to(device) print(newmodel) newmodel.to(device) optimizer = optim.SGD(newmodel.parameters(), lr=0.01, momentum=0.9) # optimizer = optim.Adam(newmodel.parameters()) for epoch in range(0): train(args, newmodel, device, train_loader, optimizer, epoch) j_loss, j_score = eval(args, newmodel, device, test_loader) # For embedding triplet_dataset_train = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1) triplet_train_loader = torch.utils.data.DataLoader( triplet_dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs) triplet_dataset_test = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=-1) triplet_test_loader = torch.utils.data.DataLoader( triplet_dataset_test, batch_size=args.batch_size, shuffle=False, **kwargs) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=1, last_epoch=-1) from sklearn.pipeline import Pipeline from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import ShuffleSplit, cross_val_score lda = LinearDiscriminantAnalysis() Testmodel = nn.Sequential(model.embedding_net, model.fc, nn.LogSoftmax(dim=1)).to(device) print(Testmodel) for temp in range(1, 30): #10epoch마다 세이브 fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval) j_loss, j_score = eval(args, Testmodel, device, test_loader) torch.save(model.state_dict(), 'clf_' + str(temp) + '.pt') Testmodel = nn.Sequential(model.embedding_net, model.fc, nn.LogSoftmax(dim=1)).to(device) print(Testmodel) j_loss, j_score = eval(args, Testmodel, device, test_loader) dataset_train_subj = GigaDataset(x=x_data, y=y_subj, valtype=valtype, istrain=True, sess=1) train_loader_subj = torch.utils.data.DataLoader( dataset_train_subj, batch_size=args.batch_size, shuffle=True, **kwargs) dataset_test_subj = GigaDataset(x=x_data, y=y_subj, valtype=valtype, istrain=False, sess=2, subj=-1) test_loader_subj = torch.utils.data.DataLoader( dataset_test_subj, batch_size=args.batch_size, shuffle=False, **kwargs) train_embeddings_tl, train_labels_tl = extract_embeddings( train_loader_subj, model.embedding_net, 1000) val_embeddings_tl, val_labels_tl = extract_embeddings( test_loader_subj, model.embedding_net, 1000) train_labels_tl_subj = train_labels_tl - train_labels_tl % 2 # from torchvision import datasets, models, transforms # temp = model.embedding_net.children() # newmodel = torch.nn.Sequential(*(list(model.embedding_net.children())[:])) # for param in model.embedding_net.parameters(): # param.requires_grad = True #newembedding_net = torch.nn.Sequential(*(list(model.embedding_net.children())[:])) # from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=30) #features = np.concatenate([train_embeddings_tl,val_embeddings_tl]) #val_labels_tl = val_labels_tl+2 #labels = np.concatenate([train_labels_tl,val_labels_tl]) train_tsne = tsne.fit_transform(train_embeddings_tl) plot_embeddings(train_tsne, train_labels_tl % 2) val_tsne = tsne.fit_transform(val_embeddings_tl) plot_embeddings(val_tsne, val_labels_tl % 2) for param in model.embedding_net.parameters(): param.requires_grad = True #embedding_net2 = EmbeddingDeep4CNN() newmodel = nn.Sequential( model.embedding_net, nn.Linear(1000, 2), nn.Dropout(p=0.5), nn.LogSoftmax(dim=1), ).to(device) print(newmodel) #newmodel.fc_lr = nn.Linear(1000,2) newmodel.to(device) optimizer = optim.SGD(newmodel.parameters(), lr=0.01, momentum=0.9) #optimizer = optim.Adam(newmodel.parameters()) for epoch in range(1, 100): train(args, newmodel, device, train_loader, optimizer, epoch) j_loss, j_score = eval(args, newmodel, device, test_loader) if args.save_model: torch.save(newmodel.state_dict(), 'clf_83_8.pt') newmodel.load_state_dict(torch.load('clf_83_8.pt'))
def main(): import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable from trainer import fit import numpy as np cuda = torch.cuda.is_available() # Training settings parser = argparse.ArgumentParser( description='cross subject domain adaptation') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=True, help='For Saving the current Model') # Writer will output to ./runs/ directory by default writer = SummaryWriter() args = parser.parse_args() args.seed = 0 use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda" if use_cuda else "cpu") #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} from datetime import datetime import os loging = False if (args.save_model): model_save_path = 'model/DANet(Deep4Net100)_test/' if not os.path.isdir(model_save_path): os.makedirs(model_save_path) if loging: fname = model_save_path + datetime.today().strftime( "%m_%d_%H_%M") + ".txt" f = open(fname, 'w') x_data, y_data = load_smt() #get subject number y_subj = np.zeros([108, 200]) for i in range(108): y_subj[i, :] = i * 2 y_subj = y_data.reshape(108, 200) + y_subj y_subj = y_subj.reshape(21600) # nonbciilli = np.s_[0,1,2,4,5,8,16,17,18,20,21,27,28,29,30,32,35,36,38,42,43,44,51] valtype = 'sess' if valtype == 'loso': pass elif valtype == 'sess': from networks import EmbeddingDeep4CNN, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN from losses import TripletLoss_dev2 # make model for metric learning margin = 1.0 gamma = 0.7 embedding_net = EmbeddingDeep4CNN() print(embedding_net) model = TripletNet(embedding_net) #model.fc = nn.Linear(embedding_net.num_hidden,2) if cuda: model.cuda() loss_fn = TripletLoss_dev2(margin, gamma).cuda() n_epochs = 1 log_interval = 10 #load_model_path = model_save_path+'dgnet1.027.pt' #'C:\\Users\dk\PycharmProjects\giga_cnn\구모델\\clf_83_8.pt'#'clf_29.pt' #'triplet_mg26.pt'#'clf_triplet2_5.pt' #'triplet_31.pt' load_model_path = None if load_model_path is not None: model.load_state_dict(torch.load(load_model_path)) # For classification data dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1) train_loader = torch.utils.data.DataLoader( dataset_train, batch_size=args.batch_size, shuffle=True, ) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=-1) test_loader = torch.utils.data.DataLoader( dataset_test, batch_size=args.batch_size, shuffle=False, ) # For Domain adaptation triplet_dataset_train = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1) triplet_train_loader = torch.utils.data.DataLoader( triplet_dataset_train, batch_size=args.batch_size, shuffle=True) triplet_dataset_test = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=-1) triplet_test_loader = torch.utils.data.DataLoader( triplet_dataset_test, batch_size=args.batch_size, shuffle=False) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) scheduler = lr_scheduler.StepLR(optimizer, 10, gamma=0.5, last_epoch=-1) #model for validation evalmodel = nn.Sequential(model.embedding_net, model.fc, nn.LogSoftmax(dim=1)).to(device) print('____________DANet____________') print(model) # # from torch.utils.tensorboard import SummaryWriter # writer = SummaryWriter() # images, labels = next(iter(train_loader)) # import torchvision # #grid = torchvision.utils.make_grid(images) # writer.add_images('images',images) # # writer.add_embedding(metadata=train_embeddings_tl) # # writer.add_embedding(metadata = val_embeddings_tl) # writer.close() for temp in range(1, 50): fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval) train_loss, j_score = eval(args, evalmodel, device, train_loader) eval_loss, j_score = eval(args, evalmodel, device, test_loader) writer.add_scalar('Train/Loss', np.mean(train_loss) / 100, temp) writer.add_scalar('Eval/Loss', np.mean(eval_loss) / 100, temp) writer.close() np.mean(train_loss) torch.save( model.state_dict(), model_save_path + 'dgnet_' + str(gamma) + '_' + str(temp) + '.pt') #for visualization dataset_train_subj = GigaDataset(x=x_data, y=y_subj, valtype='subj', istrain=True, sess=1, subj=np.r_[0:10]) train_loader_subj = torch.utils.data.DataLoader( dataset_train_subj, batch_size=args.batch_size, shuffle=True, ) dataset_test_subj = GigaDataset(x=x_data, y=y_subj, valtype='sess', istrain=False, sess=2, subj=-1) test_loader_subj = torch.utils.data.DataLoader( dataset_test_subj, batch_size=args.batch_size, shuffle=False, ) # train_embeddings_tl, train_labels_tl = extract_features(train_loader_subj.dataset, model.embedding_net.convnet,0,100) # val_embeddings_tl, val_labels_tl = extract_embeddings(test_loader_subj, model.embedding_net, 1000) train_embeddings_tl, train_labels_tl = extract_embeddings( train_loader_subj, model.embedding_net, model.embedding_net.num_hidden) val_embeddings_tl, val_labels_tl = extract_embeddings( test_loader_subj, model.embedding_net, model.embedding_net.num_hidden) # # train_embeddings_tl, train_labels_tl = extract_embeddings(train_loader_subj, model.embedding_net.convnet[0], 1000) # val_embeddings_tl, val_labels_tl = extract_embeddings(test_loader_subj, model.embedding_net, 1000) # = train_labels_tl-train_labels_tl%2 # from torchvision import datasets, models, transforms # temp = model.embedding_net.children() # newmodel = torch.nn.Sequential(*(list(model.embedding_net.children())[:])) # for param in model.embedding_net.parameters(): # param.requires_grad = True #newembedding_net = torch.nn.Sequential(*(list(model.embedding_net.children())[:])) # from sklearn.manifold import TSNE tsne = TSNE(n_components=2, perplexity=30) #features = np.concatenate([train_embeddings_tl,val_embeddings_tl]) #val_labels_tl = val_labels_tl+2 #labels = np.concatenate([train_labels_tl,val_labels_tl]) train_tsne = tsne.fit_transform(train_embeddings_tl) plot_features(train_tsne, train_labels_tl) plot_features(train_tsne, train_labels_tl % 2) for i in range(0, 10): plot_features(train_tsne[i * 200:(i + 1) * 200], train_labels_tl[i * 200:(i + 1) * 200]) plot_features3d(train_tsne, train_labels_tl) val_tsne = tsne.fit_transform(val_embeddings_tl) plot_features(val_tsne, val_labels_tl - 108) plot_features(val_tsne, val_labels_tl % 2) plot_features3d(val_tsne, val_labels_tl % 2) plot_embeddings(val_tsne, (val_labels_tl - 108) - (val_labels_tl - 108) % 2) for param in model.embedding_net.parameters(): param.requires_grad = True #embedding_net2 = EmbeddingDeep4CNN() newmodel = nn.Sequential( model.embedding_net, nn.Linear(1000, 2), nn.Dropout(p=0.5), nn.LogSoftmax(dim=1), ).to(device) print(newmodel) #newmodel.fc_lr = nn.Linear(1000,2) newmodel.to(device) optimizer = optim.SGD(newmodel.parameters(), lr=0.001, momentum=0.9) #optimizer = optim.Adam(newmodel.parameters()) for epoch in range(1, 100): train(args, newmodel, device, train_loader, optimizer, epoch) j_loss, j_score = eval(args, newmodel, device, test_loader) if args.save_model: torch.save(newmodel.state_dict(), 'clf_83_8.pt') newmodel.load_state_dict(torch.load(load_model_path))
def main(): import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable from trainer import fit import numpy as np cuda = torch.cuda.is_available() # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=True, help='For Saving the current Model') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} from datetime import datetime import os loging = False ismultitask = False loso = False if (args.save_model): model_save_path = 'model/triplet/' if not os.path.isdir(model_save_path): os.makedirs(model_save_path) if loging: fname = model_save_path + datetime.today().strftime("%m_%d_%H_%M") + ".txt" f = open(fname, 'w') x_data, y_data = load_smt() y_subj = np.zeros([108, 200]) for i in range(108): y_subj[i, :] = i * 2 y_subj = y_data.reshape(108, 200) + y_subj y_subj = y_subj.reshape(21600) # nonbciilli = np.s_[0,1,2,4,5,8,16,17,18,20,21,27,28,29,30,32,35,36,38,42,43,44,51] valtype = 'sess' if valtype == 'loso': pass elif valtype == 'sess': from networks import EmbeddingDeep4CNN, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN from losses import TripletLoss_dev2 # make model for metric learning margin = 1 embedding_net = EmbeddingShallowCNN() # clf_net = nn.Sequential(EmbeddingDeep4CNN(),nn.Linear(1000,2),nn.Dropout(p=1),nn.LogSoftmax(dim=1)) print(embedding_net) model = TripletNet(embedding_net) if cuda: model.cuda() loss_fn = TripletLoss_dev2(margin).cuda() n_epochs =1 log_interval = 10 load_model_path = None#'C:\\Users\dk\PycharmProjects\giga_cnn\구모델\\clf_83_8.pt'#'clf_29.pt' #'triplet_mg26.pt'#'clf_triplet2_5.pt' #'triplet_31.pt' model.fc = nn.Sequential(nn.Linear(1000, 2), nn.Dropout(p=0.5)) if load_model_path is not None: embedding_net.load_state_dict(torch.load(load_model_path)) # For classification dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, sess=1) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, sess=2, subj=-1) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False, **kwargs) #make model for classification newmodel = nn.Sequential(model.embedding_net, nn.Linear(1000, 2), nn.Dropout(p=0.5), nn.LogSoftmax(dim=1) ).to(device) print(newmodel) newmodel.to(device) optimizer = optim.SGD(newmodel.parameters(), lr=0.001, momentum=0.9) # optimizer = optim.Adam(newmodel.parameters()) for epoch in range(0): train(args, newmodel, device, train_loader, optimizer, epoch) j_loss, j_score = eval(args, newmodel, device, test_loader) # For embedding triplet_dataset_train = TripletGiga(x=x_data, y=y_data,valtype=valtype, istrain=True, sess=1) triplet_train_loader = torch.utils.data.DataLoader(triplet_dataset_train, batch_size=args.batch_size, shuffle=True, **kwargs) triplet_dataset_test = TripletGiga(x=x_data, y=y_data,valtype=valtype, istrain=False, sess=2, subj=-1) triplet_test_loader = torch.utils.data.DataLoader(triplet_dataset_test, batch_size=args.batch_size, shuffle=False, **kwargs) optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) scheduler = lr_scheduler.StepLR(optimizer, 8, gamma=1, last_epoch=-1) from sklearn.pipeline import Pipeline from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import ShuffleSplit, cross_val_score lda = LinearDiscriminantAnalysis() Testmodel = nn.Sequential(model.embedding_net, model.fc, nn.LogSoftmax(dim=1)).to(device) # tempEmbeddingNet = nn.Sequential(model.embedding_net, # nn.Linear(1000,1000), # nn.Sigmoid()) # model = TripletNet(embedding_net) print(model) for temp in range(1, 30): # 10epoch마다 세이브 fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval) j_loss, j_score = eval(args, Testmodel, device, train_loader) j_loss, j_score = eval(args, Testmodel, device, test_loader) torch.save(model.state_dict(), 'clf_' + str(temp) + '.pt') torch.save(model.state_dict(), 'shallowDG_150epoch_82acc' + str(temp) + '.pt') #for visualization dataset_train_subj = GigaDataset(x=x_data, y=y_subj, valtype=valtype, istrain=True, sess=1) train_loader_subj = torch.utils.data.DataLoader(dataset_train_subj, batch_size=args.batch_size, shuffle=True, **kwargs) dataset_test_subj = GigaDataset(x=x_data, y=y_subj, valtype=valtype, istrain=False, sess=2, subj=-1) test_loader_subj = torch.utils.data.DataLoader(dataset_test_subj, batch_size=args.batch_size, shuffle=False, **kwargs) # train_embeddings_tl, train_labels_tl = extract_features(train_loader_subj.dataset, model.embedding_net.convnet,0,100) # val_embeddings_tl, val_labels_tl = extract_embeddings(test_loader_subj, model.embedding_net, 1000) train_embeddings_tl, train_labels_tl = extract_embeddings(train_loader_subj, model.embedding_net,1000) val_embeddings_tl, val_labels_tl = extract_embeddings(test_loader_subj, model.embedding_net,1000) # # train_embeddings_tl, train_labels_tl = extract_embeddings(train_loader_subj, model.embedding_net.convnet[0], 1000) # val_embeddings_tl, val_labels_tl = extract_embeddings(test_loader_subj, model.embedding_net, 1000) # = train_labels_tl-train_labels_tl%2 # from torchvision import datasets, models, transforms # temp = model.embedding_net.children() # newmodel = torch.nn.Sequential(*(list(model.embedding_net.children())[:])) # for param in model.embedding_net.parameters(): # param.requires_grad = True #newembedding_net = torch.nn.Sequential(*(list(model.embedding_net.children())[:])) # from sklearn.manifold import TSNE tsne = TSNE(n_components=2,perplexity=30) #features = np.concatenate([train_embeddings_tl,val_embeddings_tl]) #val_labels_tl = val_labels_tl+2 #labels = np.concatenate([train_labels_tl,val_labels_tl]) train_tsne = tsne.fit_transform(train_embeddings_tl[0:2000]) # plot_embeddings(train_tsne,train_labels_tl[0:1000]) plot_features(train_tsne,train_labels_tl[0:2000]) plot_features3d(train_tsne,train_labels_tl[0:1000]%2) val_tsne = tsne.fit_transform(val_embeddings_tl) plot_embeddings(val_tsne, (val_labels_tl-108)-(val_labels_tl-108)%2) for param in model.embedding_net.parameters(): param.requires_grad = True #embedding_net2 = EmbeddingDeep4CNN() newmodel = nn.Sequential(model.embedding_net, nn.Linear(1000, 2), nn.Dropout(p=0.5), nn.LogSoftmax(dim=1), ).to(device) print(newmodel) #newmodel.fc_lr = nn.Linear(1000,2) newmodel.to(device) optimizer = optim.SGD(newmodel.parameters(), lr=0.001, momentum=0.9) #optimizer = optim.Adam(newmodel.parameters()) for epoch in range(1, 100): train(args, newmodel, device, train_loader, optimizer, epoch) j_loss, j_score = eval(args, newmodel, device, test_loader) if args.save_model: torch.save(newmodel.state_dict(),'clf_83_8.pt') newmodel.load_state_dict(torch.load(load_model_path)) # Visualize feature maps activation = {} def get_activation(name): def hook(model, input, output): activation[name] = output.detach() return hook handle = model.embedding_net.convnet[0].register_forward_hook(get_activation('fc')) handle.remove() model.embedding_net.convnet[0]._forward_hooks.clear() train_loader.dataset with torch.no_grad(): model.eval() # num_ftrs = model.embedding_net.fc.out_features embeddings = np.zeros((len(train_loader.dataset), num_ftrs)) labels = np.zeros(len(train_loader.dataset)) k = 0 for images, target in train_loader: if cuda: images = images.cuda() embeddings[k:k + len(images)] = model.get_embedding(images).data.cpu().numpy() labels[k:k + len(images)] = target.numpy() k += len(images) features = SaveFeatures(model.embedding_net.convnet[0]) temp = features.features.data.cpu().numpy() del features.features torch.cuda.empty_cache() for images, target in train_loader: if cuda: images = images.cuda() output = model.embedding_net(images) activation = [] def get_activation(): def hook(model, input, output): activation.append(output) print(output) return hook act = activation['conv1'].squeeze() fig, axarr = plt.subplots(act.size(0)) for idx in range(act.size(0)): axarr[idx].imshow(act[idx]) actmap = [] def printnorm(self, input, output): # input is a tuple of packed inputs # output is a Tensor. output.data is the Tensor we are interested print('Inside ' + self.__class__.__name__ + ' forward') print('') print('input: ', type(input)) print('input[0]: ', type(input[0])) print('output: ', type(output)) print('') print('input size:', input[0].size()) print('output size:', output.data.size()) print('output norm:', output.data.norm()) return output.data model.embedding_net.convnet[0].register_forward_hook(printnorm) out = model(input) fig, axarr = plt.subplots(10) for idx in range(10): axarr[idx].imshow(temp[idx,1,:,:])
def main(): import torch from torch.optim import lr_scheduler import torch.optim as optim from torch.autograd import Variable from trainer import fit import numpy as np cuda = torch.cuda.is_available() # Training settings parser = argparse.ArgumentParser( description='cross subject domain adaptation') parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--save-model', action='store_true', default=True, help='For Saving the current Model') # Writer will output to ./runs/ directory by default fold_idx = 5 gamma = 1 DAsetting = False args = parser.parse_args() args.seed = 0 args.use_tensorboard = True use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False device = torch.device("cuda" if use_cuda else "cpu") #kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} from datetime import datetime import os loging = False x_data, y_data = load_smt() #get subject number y_subj = np.zeros([108, 200]) for i in range(108): y_subj[i, :] = i * 2 y_subj = y_data.reshape(108, 200) + y_subj y_subj = y_subj.reshape(21600) # For classification data valtype = 'subj' # #subj - 0-27 train # train_subj1 = np.r_[0:27] # train_subj2 = np.r_[0:27]+54 # # test_subj = np.r_[27:54,54+27:108] # For Domain adaptation setting if DAsetting: train_subj1 = np.r_[27:54] train_subj2 = np.r_[27:54] + 54 test_subj = np.r_[0:27, 54 + 0:54 + 27] trial_s = (0, 200) trial_t = (0, 200) trial_val = (0, 200) dataset_train1 = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj1, trial=trial_s) dataset_train2 = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj2, trial=trial_t) dataset_train = dataset_train1.__add__(dataset_train2) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) triplet_dataset_train1 = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj1, trial=trial_s) triplet_dataset_train2 = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj2, trial=trial_t) triplet_dataset_train = triplet_dataset_train1.__add__( triplet_dataset_train2) triplet_dataset_test = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) else: #DG setting test_subj = np.r_[fold_idx * 9:fold_idx * 9 + 9, fold_idx + 54:fold_idx + 9 + 54] train_subj = np.setxor1d(np.r_[0:108], test_subj) trial_train = (0, 200) trial_val = (0, 200) dataset_train = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj, trial=trial_train) dataset_test = GigaDataset(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) triplet_dataset_train = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=True, subj=train_subj, trial=trial_train) triplet_dataset_test = TripletGiga(x=x_data, y=y_data, valtype=valtype, istrain=False, subj=test_subj, trial=trial_val) train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=args.batch_size, shuffle=False) triplet_train_loader = torch.utils.data.DataLoader( triplet_dataset_train, batch_size=args.batch_size, shuffle=True) triplet_test_loader = torch.utils.data.DataLoader( triplet_dataset_test, batch_size=args.batch_size, shuffle=False) ################################################################################################################### # make model for metric learning from networks import Deep4Net, EmbeddingDeep4CNN, EmbeddingDeep4CNN_bn, TripletNet, FineShallowCNN, EmbeddingDeepCNN, QuintupletNet, EmbeddingShallowCNN from losses import TripletLoss_dev2 margin = 1.0 embedding_net = Deep4Net() print(embedding_net) model = TripletNet(embedding_net) if cuda: model.cuda() loss_fn = TripletLoss_dev2(margin, gamma).cuda() n_epochs = 1 log_interval = 10 # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9) # scheduler = lr_scheduler.StepLR(optimizer, 5, gamma=0.5, last_epoch=-1) optimizer = optim.Adam(model.parameters(), lr=0.01) scheduler = lr_scheduler.StepLR(optimizer, 10, gamma=1, last_epoch=-1) #model for validation evalmodel = nn.Sequential(model.embedding_net, model.fc, nn.LogSoftmax(dim=1)).to(device) print('____________DANet____________') print(model) comment = 'fold_' + str(fold_idx) + '_g_' + str(gamma) #save someting if (args.save_model): model_save_path = 'model/Deep4Net_(test0604)/' + comment + '/' if not os.path.isdir(model_save_path): os.makedirs(model_save_path) if loging: fname = model_save_path + datetime.today().strftime( "%m_%d_%H_%M") + ".txt" f = open(fname, 'w') if args.use_tensorboard: writer = SummaryWriter(comment=comment) # load_model_path = model_save_path+'danet_0.7_49.pt' #'C:\\Users\dk\PycharmProjects\giga_cnn\구모델\\clf_83_8.pt'#'clf_29.pt' #'triplet_mg26.pt'#'clf_triplet2_5.pt' #'triplet_31.pt' load_model_path = None if load_model_path is not None: model.load_state_dict(torch.load(load_model_path)) for temp in range(1, 50): fit(triplet_train_loader, triplet_test_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval) print(temp) train_loss, train_score = eval(args, evalmodel, device, train_loader) eval_loss, eval_score = eval(args, evalmodel, device, test_loader) if args.use_tensorboard: writer.add_scalar('Train/Loss', np.mean(train_loss) / 100, temp) writer.add_scalar('Train/Acc', np.mean(train_score) / 100, temp) writer.add_scalar('Eval/Loss', np.mean(eval_loss) / 100, temp) writer.add_scalar('Eval/Acc', np.mean(eval_score) / 100, temp) writer.close() torch.save( model.state_dict(), model_save_path + 'danet_' + str(gamma) + '_' + str(temp) + '.pt')