def main(): import argparse parser = argparse.ArgumentParser( description="imsitu VSRL. Training, evaluation and prediction.") parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int) #parser.add_argument("--command", choices = ["train", "eval", "resume", 'predict'], required = True) parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]') parser.add_argument('--resume_model', type=str, default='', help='The model we resume') parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module') parser.add_argument('--noun_module', type=str, default='', help='pretrained noun module') parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch') parser.add_argument( '--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch') parser.add_argument( '--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch') parser.add_argument('--finetune_both', action='store_true', help='cnn fix, verb finetune, role finetune') parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model') parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode') parser.add_argument('--test', action='store_true', help='Only use the testing mode') #todo: train role module separately with gt verbs args = parser.parse_args() batch_size = 640 #lr = 5e-6 lr = 0.0001 lr_max = 5e-4 lr_gamma = 0.1 lr_step = 25 clip_norm = 50 weight_decay = 1e-4 n_epoch = 500 n_worker = 3 dataset_folder = 'imSitu' imgset_folder = 'resized_256' print('model spec :, mac net finetune pretrained verb and role labeller ') train_set = json.load(open(dataset_folder + "/train.json")) encoder = imsitu_encoder(train_set) model = model_mac_finetune_both.E2ENetwork(encoder, args.gpuid) # To group up the features cnn_verb_features, cnn_noun_features, verb_features, role_features = utils.group_features( model) train_set = imsitu_loader(imgset_folder, train_set, encoder, model.train_preprocess()) train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True, num_workers=n_worker) dev_set = json.load(open(dataset_folder + "/dev.json")) dev_set = imsitu_loader(imgset_folder, dev_set, encoder, model.dev_preprocess()) dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=64, shuffle=True, num_workers=n_worker) test_set = json.load(open(dataset_folder + "/test.json")) test_set = imsitu_loader(imgset_folder, test_set, encoder, model.dev_preprocess()) test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True, num_workers=n_worker) traindev_set = json.load(open(dataset_folder + "/dev.json")) traindev_set = imsitu_loader(imgset_folder, traindev_set, encoder, model.dev_preprocess()) traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker) utils.set_trainable(model, False) if args.train_role: print('CNN fix, Verb fix, train role from the scratch from: {}'.format( args.verb_module)) args.train_all = False if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 1 model_name = 'cfx_vfx_rtrain' elif args.finetune_verb: print('CNN fix, Verb finetune, train role from the scratch from: {}'. format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 2 model_name = 'cfx_vft_rtrain' elif args.finetune_cnn: print( 'CNN finetune, Verb finetune, train role from the scratch from: {}' .format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 3 model_name = 'cft_vft_rtrain' elif args.resume_training: print('Resume training from: {}'.format(args.resume_model)) args.train_all = True if len(args.resume_model) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.resume_model, [model]) optimizer_select = 0 model_name = 'resume_all' elif args.finetune_both: print('finetune model from pretrained verb and noun models') args.train_all = True if len(args.verb_module) == 0 or len(args.noun_module) == 0: raise Exception('[pretrained verb or noun module] not specified') utils.load_net(args.verb_module, [model.conv_verb, model.verb], ['conv', 'verb']) utils.load_net(args.noun_module, [ model.conv_noun, model.role_lookup, model.verb_lookup, model.role_labeller ], ['conv', 'role_lookup', 'verb_lookup', 'role_labeller']) optimizer_select = 4 model_name = 'finetune_both' else: print('Training from the scratch.') optimizer_select = 0 args.train_all = True model_name = 'train_full' optimizer = utils.get_optimizer(lr, weight_decay, optimizer_select, cnn_verb_features, cnn_noun_features, verb_features, role_features) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) if args.gpuid >= 0: #print('GPU enabled') model.cuda() '''optimizer = torch.optim.Adam([{'params': model.verb.parameters(), 'lr': 5e-5}, {'params': model.role_lookup.parameters(), 'lr': 5e-5}, {'params': model.verb_lookup.parameters(), 'lr': 5e-5}, {'params': model.role_labeller.parameters(), 'lr': 5e-5}])''' #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma) #gradient clipping, grad check scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) if args.evaluate: top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file=True) top1_avg = top1.get_average_results() top5_avg = top5.get_average_results() avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \ top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"] avg_score /= 8 print('Dev average :{:.2f} {} {}'.format( avg_score * 100, utils.format_dict(top1_avg, '{:.2f}', '1-'), utils.format_dict(top5_avg, '{:.2f}', '5-'))) #write results to csv file role_dict = top1.role_dict fail_val_all = top1.value_all_dict pass_val_dict = top1.vall_all_correct with open('role_pred_data.json', 'w') as fp: json.dump(role_dict, fp, indent=4) with open('fail_val_all.json', 'w') as fp: json.dump(fail_val_all, fp, indent=4) with open('pass_val_all.json', 'w') as fp: json.dump(pass_val_dict, fp, indent=4) print('Writing predictions to file completed !') elif args.test: top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file=True) top1_avg = top1.get_average_results() top5_avg = top5.get_average_results() avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \ top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"] avg_score /= 8 print('Test average :{:.2f} {} {}'.format( avg_score * 100, utils.format_dict(top1_avg, '{:.2f}', '1-'), utils.format_dict(top5_avg, '{:.2f}', '5-'))) else: print('Model training started!') train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)
def main(): import argparse parser = argparse.ArgumentParser(description="imsitu VSRL. Training, evaluation and prediction.") parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int) #parser.add_argument("--command", choices = ["train", "eval", "resume", 'predict'], required = True) parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]') parser.add_argument('--resume_model', type=str, default='', help='The model we resume') parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module') parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch') parser.add_argument('--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch') parser.add_argument('--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch') parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model') #todo: train role module separately with gt verbs args = parser.parse_args() batch_size = 640 #lr = 5e-6 lr = 0.0001 lr_max = 5e-4 lr_gamma = 0.1 lr_step = 25 clip_norm = 50 weight_decay = 1e-4 n_epoch = 500 n_worker = 4 dataset_folder = 'imSitu' imgset_folder = 'resized_256' print('model spec :, gmac net v pred for training and loss calc normalizing from only matching role count ') train_set = json.load(open(dataset_folder + "/train.json")) encoder = imsitu_encoder(train_set) model = gmac_model_with_verb.E2ENetwork(encoder, args.gpuid) # To group up the features cnn_features, verb_features, role_features = utils.group_features(model) train_set = imsitu_loader(imgset_folder, train_set, encoder, model.train_preprocess()) train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True, num_workers=n_worker) dev_set = json.load(open(dataset_folder +"/dev.json")) dev_set = imsitu_loader(imgset_folder, dev_set, encoder, model.dev_preprocess()) dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=64, shuffle=True, num_workers=n_worker) traindev_set = json.load(open(dataset_folder +"/dev.json")) traindev_set = imsitu_loader(imgset_folder, traindev_set, encoder, model.dev_preprocess()) traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker) utils.set_trainable(model, False) if args.train_role: print('CNN fix, Verb fix, train role from the scratch from: {}'.format(args.verb_module)) args.train_all = False if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 1 model_name = 'cfx_vfx_rtrain' elif args.finetune_verb: print('CNN fix, Verb finetune, train role from the scratch from: {}'.format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 2 model_name = 'cfx_vft_rtrain' elif args.finetune_cnn: print('CNN finetune, Verb finetune, train role from the scratch from: {}'.format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 3 model_name = 'cft_vft_rtrain' elif args.resume_training: print('Resume training from: {}'.format(args.resume_model)) args.train_all = True if len(args.resume_model) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.resume_model, [model]) optimizer_select = 0 model_name = 'resume_all' else: print('Training from the scratch.') optimizer_select = 0 args.train_all = True model_name = 'train_full' optimizer = utils.get_optimizer(lr,weight_decay,optimizer_select, cnn_features, verb_features, role_features) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) if args.gpuid >= 0: #print('GPU enabled') model.cuda() optimizer = torch.optim.Adam([{'params': model.conv.parameters(), 'lr': 5e-5}, {'params': model.verb.parameters()}, {'params': model.role_labeller.parameters()}], lr=1e-3) #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) #scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma) #gradient clipping, grad check scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) print('Model training started!') train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)
model.load_state_dict(torch.load(args.weights_file)) dataset_train = imSituSituation(args.image_dir, train_set, encoder, model.train_preprocess()) dataset_dev = imSituSituation(args.image_dir, dev_set, encoder, model.dev_preprocess()) ngpus = 1 device_array = [i for i in range(0,ngpus)] #batch_size = args.batch_size*ngpus batch_size = 1 train_loader = torch.utils.data.DataLoader(dataset_train, batch_size = batch_size, shuffle = True, num_workers = 1) dev_loader = torch.utils.data.DataLoader(dataset_dev, batch_size = batch_size, shuffle = True, num_workers = 1) model.cuda() # need to make f rcnn params fixed frcnn_features, crf_features = group_features(model) optimizer = network.get_optimizer_dvsrl(args.learning_rate, 0, 1, args, frcnn_features, crf_features, args.weight_decay) #optimizer = optim.Adam(model.parameters(), lr = args.learning_rate , weight_decay = args.weight_decay) train_model(args.training_epochs, args.eval_frequency, train_loader, dev_loader, model, encoder, optimizer, args.output_dir) elif args.command == "eval": print "command = evaluating" eval_file = json.load(open(args.dataset_dir + "/" + args.eval_file)) if args.encoding_file is None: print "expecting encoder file to run evaluation" exit() else: encoder = torch.load(args.encoding_file) print "creating model..."
def main(): import argparse parser = argparse.ArgumentParser( description="imsitu VSRL. Training, evaluation and prediction.") parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int) #parser.add_argument("--command", choices = ["train", "eval", "resume", 'predict'], required = True) parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]') parser.add_argument('--resume_model', type=str, default='', help='The model we resume') parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module') parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch') parser.add_argument( '--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch') parser.add_argument( '--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch') parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model') parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode') #todo: train role module separately with gt verbs args = parser.parse_args() batch_size = 640 #lr = 5e-6 lr = 0.0001 lr_max = 5e-4 lr_gamma = 0.1 lr_step = 25 clip_norm = 50 weight_decay = 1e-4 n_epoch = 500 n_worker = 3 dataset_folder = 'imSitu' imgset_folder = 'resized_256' train_set = json.load(open(dataset_folder + "/train.json")) encoder = imsitu_encoder(train_set) model = model_vsrl_small_finetune.RelationNetworks(encoder, args.gpuid) # To group up the features cnn_features, verb_features, role_features = utils.group_features(model) train_set = imsitu_loader(imgset_folder, train_set, encoder, model.train_preprocess()) train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True, num_workers=n_worker) dev_set = json.load(open(dataset_folder + "/dev.json")) dev_set = imsitu_loader(imgset_folder, dev_set, encoder, model.train_preprocess()) dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=32, shuffle=True, num_workers=n_worker) traindev_set = json.load(open(dataset_folder + "/dev.json")) traindev_set = imsitu_loader(imgset_folder, traindev_set, encoder, model.train_preprocess()) traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker) utils.set_trainable(model, False) if args.train_role: print('CNN fix, Verb fix, train role from the scratch from: {}'.format( args.verb_module)) args.train_all = False if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 1 model_name = 'cfx_vfx_rtrain' elif args.finetune_verb: print('CNN fix, Verb finetune, train role from the scratch from: {}'. format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 2 model_name = 'cfx_vft_rtrain' elif args.finetune_cnn: print( 'CNN finetune, Verb finetune, train role from the scratch from: {}' .format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 3 model_name = 'cft_vft_rtrain' elif args.resume_training: print('Resume training from: {}'.format(args.resume_model)) args.train_all = True if len(args.resume_model) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.resume_model, [model]) optimizer_select = 0 model_name = 'resume_all' else: if not args.evaluate: print('Training from the scratch.') optimizer_select = 0 args.train_all = True model_name = 'train_full' optimizer = utils.get_optimizer(lr, weight_decay, optimizer_select, cnn_features, verb_features, role_features) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) if args.gpuid >= 0: #print('GPU enabled') model.cuda() #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma) #gradient clipping, grad check if args.evaluate: top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file=True) top1_avg = top1.get_average_results() top5_avg = top5.get_average_results() avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \ top5_avg["value"] + top5_avg["value-all"] avg_score /= 8 print('Dev average :{:.2f} {} {}'.format( avg_score * 100, utils.format_dict(top1_avg, '{:.2f}', '1-'), utils.format_dict(top5_avg, '{:.2f}', '5-'))) #write results to csv file gt_labels = top1.gt_situation pred_labels = top1.predicted_situation verb_pred = top1.verb_pred with open("gt_rn_only.csv", "w") as f: writer = csv.writer(f) writer.writerows(gt_labels) with open("pred_rn_only.csv", "w") as f: writer = csv.writer(f) writer.writerows(pred_labels) with open("verbpred_rn_only.csv", "w") as f: writer = csv.writer(f) writer.writerow(['verb', 'total', 'predicted']) for key, value in verb_pred.items(): writer.writerow([key, value[0], value[1]]) print('Writing predictions to file completed !') else: print('Model training started!') train(model, train_loader, dev_loader, traindev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)
def main(): import argparse parser = argparse.ArgumentParser( description="imsitu VSRL. Training, evaluation and prediction.") parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int) #parser.add_argument("--command", choices = ["train", "eval", "resume", 'predict'], required = True) parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]') parser.add_argument('--resume_model', type=str, default='', help='The model we resume') parser.add_argument('--verb_module', type=str, default='', help='pretrained verb module') parser.add_argument('--train_role', action='store_true', help='cnn fix, verb fix, role train from the scratch') parser.add_argument( '--finetune_verb', action='store_true', help='cnn fix, verb finetune, role train from the scratch') parser.add_argument( '--finetune_cnn', action='store_true', help='cnn finetune, verb finetune, role train from the scratch') parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model') #todo: train role module separately with gt verbs args = parser.parse_args() batch_size = 640 #lr = 5e-6 lr = 0 lr_max = 5e-4 lr_gamma = 0.1 lr_step = 25 clip_norm = 50 weight_decay = 1e-4 n_epoch = 500 n_worker = 3 dataset_folder = 'imSitu' imgset_folder = 'resized_256' print( 'model spec :, 256 hidden, 1e-4 init lr, 25 epoch decay, 4 layer mlp for g,2mlp f1, 3 att layers with res connections param init xavier uni 2 heads dropout 0.5 mask 6loss maskb4g transformopt' ) train_set = json.load(open(dataset_folder + "/train.json")) encoder = imsitu_encoder(train_set) model = model_vsrl_finetune_selfatt_ff.RelationNetworks( encoder, args.gpuid) # To group up the features cnn_features, verb_features, role_features = utils.group_features(model) train_set = imsitu_loader(imgset_folder, train_set, encoder, model.train_preprocess()) train_loader = torch.utils.data.DataLoader(train_set, batch_size=24, shuffle=True, num_workers=n_worker) dev_set = json.load(open(dataset_folder + "/dev.json")) dev_set = imsitu_loader(imgset_folder, dev_set, encoder, model.train_preprocess()) dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=24, shuffle=True, num_workers=n_worker) traindev_set = json.load(open(dataset_folder + "/dev.json")) traindev_set = imsitu_loader(imgset_folder, traindev_set, encoder, model.train_preprocess()) traindev_loader = torch.utils.data.DataLoader(traindev_set, batch_size=8, shuffle=True, num_workers=n_worker) utils.set_trainable(model, False) if args.train_role: print('CNN fix, Verb fix, train role from the scratch from: {}'.format( args.verb_module)) args.train_all = False if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 1 model_name = 'cfx_vfx_rtrain' elif args.finetune_verb: print('CNN fix, Verb finetune, train role from the scratch from: {}'. format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 2 model_name = 'cfx_vft_rtrain' elif args.finetune_cnn: print( 'CNN finetune, Verb finetune, train role from the scratch from: {}' .format(args.verb_module)) args.train_all = True if len(args.verb_module) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.verb_module, [model.conv, model.verb], ['conv', 'verb']) optimizer_select = 3 model_name = 'cft_vft_rtrain' elif args.resume_training: print('Resume training from: {}'.format(args.resume_model)) args.train_all = True if len(args.resume_model) == 0: raise Exception('[pretrained verb module] not specified') utils.load_net(args.resume_model, [model]) optimizer_select = 0 model_name = 'resume_all' else: print('Training from the scratch.') optimizer_select = 0 args.train_all = True model_name = 'train_full' optimizer = utils.get_optimizer(lr, weight_decay, optimizer_select, cnn_features, verb_features, role_features) if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) if args.gpuid >= 0: #print('GPU enabled') model.cuda() opt = utils.NoamOpt(256, 1, 4000, optimizer) #optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_step, gamma=lr_gamma) #gradient clipping, grad check print('Model training started!') train(model, train_loader, dev_loader, traindev_loader, opt, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, lr_max, model_name, args)