def try_wrapped(tree, args, func_name="test_me"): copy_tree = copy.deepcopy(tree) exec_tree = visitor.wrap_function(tree, args, func_name) trace = [] code = compile(exec_tree, filename='<blah>', mode='exec') namespace = {} exec(code, namespace) blockPrint() import time start = time.time() namespace['wrapper'](trace) end = time.time() enablePrint() if end - start > 10: from utils import TimeExceeded raise TimeExceeded return trace
def exp(args, fold_idx, train_set, valid_set, test_set): path = args.save_root + args.result_dir if not os.path.isdir(path): os.makedirs(path) os.makedirs(path + '/models') os.makedirs(path + '/logs') logger = eegdg_logger(path + f'/logs/{fold_idx}') with open(path + '/args.txt', 'w') as f: f.write(str(args)) import torch.cuda cuda = torch.cuda.is_available() # check if GPU is available, if True chooses to use it device = 'cuda' if cuda else 'cpu' if cuda: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False seed = args.seed random.seed(seed) torch.manual_seed(seed) if cuda: torch.cuda.manual_seed_all(seed) np.random.seed(seed) train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=args.batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, shuffle=False) model = models.get_model(args) # model = FcClfNet(embedding_net) # model = torch.nn.DataParallel(model) mb_params = utils.param_size(model) print(f"Model size = {mb_params:.4f} MB") if cuda: model.cuda(device=device) print(model) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=1e-4, momentum=0.9) scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs - 1) results_columns = [ f'valid_loss', f'test_loss', f'valid_accuracy', f'test_accuracy' ] df = pd.DataFrame(columns=results_columns) valid_acc = 0 valid_min_loss = 100 best_acc = 0 best_acc_loss = 0 max_acc = 0 n_epochs_stop = 200 epochs_no_improve = 0 early_stop = False for epochidx in range(1, args.epochs): print(epochidx) start = time.time() train(10, model, device, train_loader, optimizer, scheduler, cuda, args.gpuidx) print(f'total time: {time.time()-start}') utils.blockPrint() train_loss, train_score = eval(model, device, train_loader) valid_loss, valid_score = eval(model, device, valid_loader) test_loss, test_score = eval(model, device, test_loader) utils.enablePrint() scheduler.step() lr = scheduler.get_last_lr()[0] print(f'LR : {lr}') logger.log_training(train_loss, train_score, test_loss, test_score, lr, epochidx) results = { f'valid_loss': valid_loss, f'test_loss': test_loss, f'valid_accuracy': valid_score, f'test_accuracy': test_score } df = df.append(results, ignore_index=True) print(results) if valid_score > valid_acc: valid_acc = valid_score best_acc = test_score torch.save( model.state_dict(), os.path.join(path, 'models', f"model_fold{fold_idx}_best.pt")) best_epoch = epochidx if valid_loss < valid_min_loss: #모델이 개선된경우 valid_min_loss = valid_loss best_acc_loss = test_score torch.save( model.state_dict(), os.path.join(path, 'models', f"model_fold{fold_idx}_best(loss).pt")) best_loss_epoch = epochidx epochs_no_improve = 0 else: epochs_no_improve += 1 if test_score > max_acc: max_acc = test_score torch.save( model.state_dict(), os.path.join(path, 'models', f"model_fold{fold_idx}_max.pt")) max_epoch = epochidx print(f'current best acc : {best_acc:.4f} at epoch {best_epoch}') print( f'current best(loss) acc : {best_acc_loss:.4f} at epoch {best_loss_epoch}' ) print(f'current max acc : {max_acc:.4f} at epoch {max_epoch}') if epochidx > 5 and epochs_no_improve == n_epochs_stop: print('Early stopping!') early_stop = True break else: continue if early_stop: print("Stopped") best_model = models.get_model(args) best_model.load_state_dict( torch.load(os.path.join(path, 'models', f"model_fold{fold_idx}_best.pt"), map_location=device)) if cuda: best_model.cuda(device=device) print("best accuracy") _, _ = eval(best_model, device, test_loader) df = utils.get_testset_accuracy(best_model, device, test_set, args) return df
for filename in sorted(files): if '.wav' in filename: print() print('Extracting Features of %s ...' % filename) filepath = rootPath + filename samplingRate, audioData = scipy.io.wavfile.read(filepath) if srDivider > 1: samplingRate /= srDivider audioData = audioData[::srDivider] gc.collect() audioData = np.swapaxes(audioData, 0, 1) spectos = [] imfFeatures = [] for eachChannel in audioData: utils.blockPrint( ) # block unwanted prints from emd implementation # detail params: threshold_1=0.000001, threshold_2=0.00001 decomposer = pyhht.emd.EMD(eachChannel, n_imfs=numberOfImfs) decomposedSignals = decomposer.decompose() utils.enablePrint() imfs = decomposedSignals[:-1] # last element is residue # print(np.shape(imfs)) # Calculate Magnitudes from IMFs(before normalization) mags = [] instfs = [] phases = [] for imf in imfs: hx = sp.hilbert(imf) # magnitude
if i % 10 == 0 or bsz < batch_size: loss_count /= 10 if bsz == batch_size else i % 10 print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f' % (epoch, num_epochs, i, total_step, loss_count, np.exp(loss_count))) loss_count = 0 tokens = banet.decoder.sample(video_encoded) tokens = tokens.data[0].squeeze() we = banet.decoder.decode_tokens(tokens) gt = banet.decoder.decode_tokens(captions[0].squeeze()) print('[vid:%d]' % video_ids[0]) print('WE: %s\nGT: %s' % (we, gt)) torch.save(banet.state_dict(), banet_pth_path) torch.save(optimizer.state_dict(), optimizer_pth_path) # 计算一下在val集上的性能并记录下来 blockPrint() banet.eval() metrics = evaluate(vocab, banet, test_range, test_prediction_txt_path, reference) enablePrint() for k, v in metrics.items(): log_value(k, v, epoch) print('%s: %.6f' % (k, v)) if k == 'METEOR' and v > best_meteor: # 备份在val集上METEOR值最好的模型 shutil.copy2(banet_pth_path, best_banet_pth_path) shutil.copy2(optimizer_pth_path, best_optimizer_pth_path) best_meteor = v banet.train()