def main(): """ Main function to spawn the train and test process. """ args = parse_args() cfg = load_config(args) print('=' * 20) # print(cfg) print('Num of GPUs: ', cfg.NUM_GPUS) print(cfg.TRAIN) print(cfg.TEST) print('output dir is: ', cfg.OUTPUT_DIR) # Perform training. if cfg.TRAIN.ENABLE: print("begin to trian the model... ") if cfg.NUM_GPUS > 1: print('gpu is over 1') torch.multiprocessing.spawn( mpu.run, nprocs=cfg.NUM_GPUS, args=( cfg.NUM_GPUS, train, args.init_method, cfg.SHARD_ID, cfg.NUM_SHARDS, cfg.DIST_BACKEND, cfg, ), daemon=False, ) else: train(cfg=cfg) # Perform multi-clip testing. if cfg.TEST.ENABLE: print("begin to test the model... ") if cfg.NUM_GPUS > 1: torch.multiprocessing.spawn( mpu.run, nprocs=cfg.NUM_GPUS, args=( cfg.NUM_GPUS, test, args.init_method, cfg.SHARD_ID, cfg.NUM_SHARDS, cfg.DIST_BACKEND, cfg, ), daemon=False, ) else: test(cfg=cfg)
def main(): """ Main function to spawn the train and test process. """ args = parse_args() cfg = load_config(args) # Perform training. print("Number of GPUS: ", cfg.NUM_GPUS) if cfg.TRAIN.ENABLE: if cfg.NUM_GPUS > 1: torch.multiprocessing.spawn( mpu.run, nprocs=cfg.NUM_GPUS, args=( cfg.NUM_GPUS, train, args.init_method, cfg.SHARD_ID, cfg.NUM_SHARDS, cfg.DIST_BACKEND, cfg, ), daemon=False, ) else: train(cfg=cfg) # Perform multi-clip testing. if cfg.TEST.ENABLE: if cfg.NUM_GPUS > 1: torch.multiprocessing.spawn( mpu.run, nprocs=cfg.NUM_GPUS, args=( cfg.NUM_GPUS, test, args.init_method, cfg.SHARD_ID, cfg.NUM_SHARDS, cfg.DIST_BACKEND, cfg, ), daemon=False, ) else: test(cfg=cfg)
# squared hinge loss loss = T.mean(T.sqr(T.maximum(0., 1. - target * train_output))) params = lasagne.layers.get_all_params(cnn, trainable=True) updates = lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR) test_output = lasagne.layers.get_output(cnn, deterministic=True) test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output))) test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)), dtype=theano.config.floatX) # Compile a function performing a training step on a mini-batch (by giving the updates dictionary) # and returning the corresponding training loss: train_fn = theano.function([input, target, LR], loss, updates=updates) # Compile a second function computing the validation loss and accuracy: val_fn = theano.function([input, target], [test_loss, test_err]) print('Training...') train_net.train( train_fn, val_fn, cnn, batch_size, LR_start, LR_decay, num_epochs, train_set.X, train_set.y, valid_set.X, valid_set.y, test_set.X, test_set.y, shuffle_parts=shuffle_parts)
# load your own csv or other format data here train_file = os.path.join(current_path, 'xxx.csv') # load the feature npy to the memory if possible(for a faster feature reading speed) feature_npy = 'xxx.npy' # data loading train_pair = data_process.get_data_pair(train_file) # pre-loding the feature to the memory c3d_feature = data_process.load_feature(feature_npy) pair = {'train': train_pair} show_datasets = {x: c3d_datasets(pair[x], c3d_feature, x) for x in ['train']} show_data_sizes = {x: len(show_datasets[x]) for x in ['train']} dataloaders = {x: DataLoader(show_datasets[x], batch_size = 256, shuffle = True, num_workers = 4) for x in ['train']} use_gpu = torch.cuda.is_available() # initilize the network and the optimizer stretigies model = embedding_net() net = triplet_net(model) net = net.cuda() criterion = torch.nn.MarginRankingLoss(margin = 0.5) optimizer = optim.SGD(net.parameters(), lr = 0.01, momentum = 0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size = 10, gamma = 0.1) train(dataloaders, net, criterion, optimizer, exp_lr_scheduler, show_data_sizes, use_gpu, num_epoches = 100)
def main(): # 设定训练网络所用的硬件设备 device = torch.device('cuda') # 选择网络模型 # net = EEGNet1_3().to(device) net = EEGNet2018().to(device) # init all # 初始化总轮回数,用来显示训练曲线 global_step = 0 # 初始化网络权重 net = train_net.weights_init(net) # 是否可视化,1 or 0 vis = 1 if vis == 1: viz = Visdom() if vis == 1: viz.line([0.], [0.], win='train_loss', update='append', opts={'title': 'train_loss'}) # viz.line([0.], [0.], win='test_results', update='append', opts={'title': 'test_results'}) # %% """loaddata""" dataset_A_train = {} targets_A_train = {} dataset_A_hold = {} targets_A_hold = {} filename_train = "G:\EEGNet\data\EEGDataset\ERP\TrainDatas/Circle/dataset_A_train.mat" filename_label = "G:\EEGNet\data\EEGDataset\ERP\TrainDatas/Circle/dataset_A_label.mat" filename_test = "G:\EEGNet\data\EEGDataset\ERP\TestDatas/Circle/dataset_A_test.mat" filename_test_label = "G:\EEGNet\data\EEGDataset\ERP\TestDatas/Circle/dataset_A_label.mat" # %% dataset_A_train[0] = scipy.io.loadmat(filename_train)['Tdata1'] targets_A_train[0] = scipy.io.loadmat(filename_label)['Tlabel1'] dataset_A_train[1] = scipy.io.loadmat(filename_train)['Tdata2'] targets_A_train[1] = scipy.io.loadmat(filename_label)['Tlabel2'] dataset_A_train[2] = scipy.io.loadmat(filename_train)['Tdata3'] targets_A_train[2] = scipy.io.loadmat(filename_label)['Tlabel3'] dataset_A_train[3] = scipy.io.loadmat(filename_train)['Tdata4'] targets_A_train[3] = scipy.io.loadmat(filename_label)['Tlabel4'] dataset_A_train[4] = scipy.io.loadmat(filename_train)['Tdata5'] targets_A_train[4] = scipy.io.loadmat(filename_label)['Tlabel5'] dataset_A_hold[0] = scipy.io.loadmat(filename_train)['Vdata1'] targets_A_hold[0] = scipy.io.loadmat(filename_label)['Vlabel1'] dataset_A_hold[1] = scipy.io.loadmat(filename_train)['Vdata2'] targets_A_hold[1] = scipy.io.loadmat(filename_label)['Vlabel2'] dataset_A_hold[2] = scipy.io.loadmat(filename_train)['Vdata3'] targets_A_hold[2] = scipy.io.loadmat(filename_label)['Vlabel3'] dataset_A_hold[3] = scipy.io.loadmat(filename_train)['Vdata4'] targets_A_hold[3] = scipy.io.loadmat(filename_label)['Vlabel4'] dataset_A_hold[4] = scipy.io.loadmat(filename_train)['Vdata5'] targets_A_hold[4] = scipy.io.loadmat(filename_label)['Vlabel5'] # %% dataset_test = scipy.io.loadmat(filename_test)['data'] targets_test = scipy.io.loadmat(filename_test_label)['labels'] X_test = np.reshape(dataset_test, [dataset_test.shape[0], 1, dataset_test.shape[1], dataset_test.shape[2]]).astype( 'float32') y_test = np.reshape(targets_test, [targets_test.shape[0], 1]).astype('float32') for epoch1 in range(5): # loop over the dataset multiple times X_train = np.reshape(dataset_A_train[epoch1], [dataset_A_train[epoch1].shape[0], 1, dataset_A_train[epoch1].shape[1], dataset_A_train[epoch1].shape[2]]).astype( 'float32') y_train = np.reshape(targets_A_train[epoch1], [targets_A_train[epoch1].shape[0], 1]).astype('float32') X_val = np.reshape(dataset_A_hold[epoch1], [dataset_A_hold[epoch1].shape[0], 1, dataset_A_hold[epoch1].shape[1], dataset_A_hold[epoch1].shape[2]]).astype( 'float32') y_val = np.reshape(targets_A_hold[epoch1], [targets_A_hold[epoch1].shape[0], 1]).astype('float32') for epoch2 in range(net.epoch): print("\nEpoch: ", epoch2, epoch1) print("\nglobal_step: ", global_step) # 训练网络:输出loss值、global_step用来绘制曲线,训练出的新网络net loss, global_step, net = train_net.train(net, X_train, y_train, net.batchSize, net.learnRate, global_step, vis) if vis == 1: viz.line([loss.item()], [global_step], win='train_loss', update='append', opts={'title':'train_loss'}) # validation and test: 输出预测值,vali_results展示所有指标,test_loss用来绘制test曲线 prediction, valid_results, valid_loss = train_net.valid(net, X_val, y_val) pred, test_results, test_loss = train_net.valid(net, X_test, y_test) if vis == 1: viz.line([test_results], [global_step], win='test_results', update='append', opts={'title':'test_results'}) print('Parameters:["acc", "auc", "recall", "precision","fmeasure"]') print('validation_results', valid_results) print('test_results', test_results)