def main(): global args print "Loading training set and testing set..." train_set = visual_genome(args.dataset_option, 'train') test_set = visual_genome('small', 'test') print "Done." train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=8, pin_memory=True) net = RPN(not args.use_normal_anchors) if args.resume_training: print 'Resume training from: {}'.format(args.resume_model) if len(args.resume_model) == 0: raise Exception('[resume_model] not specified') network.load_net(args.resume_model, net) optimizer = torch.optim.SGD([ {'params': list(net.parameters())[26:]}, ], lr=args.lr, momentum=args.momentum, weight_decay=0.0005) else: print 'Training from scratch...Initializing network...' optimizer = torch.optim.SGD(list(net.parameters())[26:], lr=args.lr, momentum=args.momentum, weight_decay=0.0005) network.set_trainable(net.features, requires_grad=False) net.cuda() if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) best_recall = 0.0 for epoch in range(0, args.max_epoch): # Training # train(train_loader, net, optimizer, epoch) # Testing recall, RPN_precision, RPN_recall = test(test_loader, net) print('Epoch[{epoch:d}]: ' 'Recall: ' 'object: {recall: .3f}%% (Best: {best_recall: .3f}%%)'.format( epoch = epoch, recall=recall * 100, best_recall=best_recall * 100)) print('object: {precision: .3f}%% ' 'object: {recall: .3f}%% '.format(precision=RPN_precision*100, recall=RPN_recall*100)) # update learning rate if epoch % args.step_size == 0: args.disable_clip_gradient = True args.lr /= 10 for param_group in optimizer.param_groups: param_group['lr'] = args.lr save_name = os.path.join(args.output_dir, '{}_epoch_{}.h5'.format(args.model_name, epoch)) network.save_net(save_name, net) print('save model: {}'.format(save_name)) if np.all(recall > best_recall): best_recall = recall save_name = os.path.join(args.output_dir, '{}_best.h5'.format(args.model_name, epoch)) network.save_net(save_name, net)
) re_cnt = True if use_tensorboard and step % log_interval == 0: exp.add_scalar_value('train_loss', train_loss / step_cnt, step=step) exp.add_scalar_value('learning_rate', lr, step=step) if _DEBUG: exp.add_scalar_value('true_positive', tp/fg*100., step=step) exp.add_scalar_value('true_negative', tf/bg*100., step=step) losses = {'rpn_cls': float(net.rpn.cross_entropy.data.cpu().numpy()[0]), 'rpn_box': float(net.rpn.loss_box.data.cpu().numpy()[0]), 'rcnn_cls': float(net.cross_entropy.data.cpu().numpy()[0]), 'rcnn_box': float(net.loss_box.data.cpu().numpy()[0])} exp.add_scalar_dict(losses, step=step) if (step % 10000 == 0) and step > 0: save_name = os.path.join(output_dir, 'faster_rcnn_{}.h5'.format(step)) network.save_net(save_name, net) print(('save model: {}'.format(save_name))) if step in lr_decay_steps: lr *= lr_decay optimizer = torch.optim.SGD(params[8:], lr=lr, momentum=momentum, weight_decay=weight_decay) if re_cnt: tp, tf, fg, bg = 0., 0., 0, 0 train_loss = 0 step_cnt = 0 t.tic() re_cnt = False
def main(): global args, optimizer_select # To set the model name automatically print args lr = args.lr args = get_model_name(args) print 'Model name: {}'.format(args.model_name) # To set the random seed random.seed(args.seed) torch.manual_seed(args.seed + 1) torch.cuda.manual_seed(args.seed + 2) print("Loading training set and testing set...") train_set = visual_genome(args.dataset_option, 'train') test_set = visual_genome(args.dataset_option, 'test') print("Done.") train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True) net = Hierarchical_Descriptive_Model( nhidden=args.mps_feature_len, n_object_cats=train_set.num_object_classes, n_predicate_cats=train_set.num_predicate_classes, MPS_iter=args.MPS_iter, object_loss_weight=train_set.inverse_weight_object, predicate_loss_weight=train_set.inverse_weight_predicate, dropout=args.dropout, use_kmeans_anchors=args.use_kmeans_anchors, base_model=args.base_model) #True # params = list(net.parameters()) # for param in params: # print param.size() print net # Setting the state of the training model net.cuda() net.train() network.set_trainable(net, False) # network.weights_normal_init(net, dev=0.01) if args.resume_model: print 'Resume training from: {}'.format(args.HDN_model) if len(args.HDN_model) == 0: raise Exception('[resume_model] not specified') network.load_net(args.HDN_model, net) # network.load_net(args.RPN_model, net.rpn) args.train_all = True optimizer_select = 3 elif args.load_RCNN: print 'Loading pretrained RCNN: {}'.format(args.RCNN_model) args.train_all = False network.load_net(args.RCNN_model, net.rcnn) optimizer_select = 2 elif args.load_RPN: print 'Loading pretrained RPN: {}'.format(args.RPN_model) args.train_all = False network.load_net(args.RPN_model, net.rpn) net.reinitialize_fc_layers() optimizer_select = 1 else: print 'Training from scratch.' net.rpn.initialize_parameters() net.reinitialize_fc_layers() optimizer_select = 0 args.train_all = True # To group up the features # vgg_features_fix, vgg_features_var, rpn_features, hdn_features = group_features(net) basenet_features, rpn_features, rcnn_feature, hdn_features = group_features( net) optimizer = network.get_optimizer(lr, optimizer_select, args, basenet_features, rpn_features, rcnn_feature, hdn_features) target_net = net if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) top_Ns = [50, 100] best_recall = np.zeros(len(top_Ns)) if args.evaluate: recall = test(test_loader, target_net, top_Ns, train_set.object_classes) print('======= Testing Result =======') for idx, top_N in enumerate(top_Ns): print( '[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)' .format(top_N=top_N, recall=recall[idx] * 100, best_recall=best_recall[idx] * 100)) print('==============================') else: for epoch in range(0, args.max_epoch): # Training train(train_loader, target_net, optimizer, epoch) # snapshot the state save_name = os.path.join( args.output_dir, '{}_epoch_{}.h5'.format(args.model_name, epoch)) network.save_net(save_name, net) print('save model: {}'.format(save_name)) recall = test(test_loader, target_net, top_Ns, train_set.object_classes) if np.all(recall > best_recall): best_recall = recall save_name = os.path.join(args.output_dir, '{}_best.h5'.format(args.model_name)) network.save_net(save_name, net) print('\nsave model: {}'.format(save_name)) print('Epoch[{epoch:d}]:'.format(epoch=epoch)), for idx, top_N in enumerate(top_Ns): print( '\t[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)' .format(top_N=top_N, recall=recall[idx] * 100, best_recall=best_recall[idx] * 100)) # updating learning policy if (epoch + 1) % args.step_size == 0 or (epoch + 1) % ( args.step_size + 2) == 0: lr /= 10 args.lr = lr print '[learning rate: {}]'.format(lr) args.enable_clip_gradient = False args.train_all = False optimizer_select = 2 # update optimizer and correponding requires_grad state optimizer = network.get_optimizer(lr, optimizer_select, args, basenet_features, rpn_features, rcnn_feature, hdn_features)
def main(): global args, optimizer_select # To set the model name automatically print args lr = args.lr args = get_model_name(args) print 'Model name: {}'.format(args.model_name) # To set the random seed random.seed(args.seed) torch.manual_seed(args.seed + 1) torch.cuda.manual_seed(args.seed + 2) print("Loading training set and testing set..."), train_set = visual_genome(args.dataset_option, 'train') test_set = visual_genome('small', 'test') print("Done.") train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=8, pin_memory=True) # Model declaration net = Hierarchical_Descriptive_Model( nhidden=args.mps_feature_len, n_object_cats=train_set.num_object_classes, n_predicate_cats=train_set.num_predicate_classes, n_vocab=train_set.voc_size, voc_sign=train_set.voc_sign, max_word_length=train_set.max_size, MPS_iter=args.MPS_iter, use_language_loss=not args.disable_language_model, object_loss_weight=train_set.inverse_weight_object, predicate_loss_weight=train_set.inverse_weight_predicate, dropout=args.dropout, use_kmeans_anchors=not args.use_normal_anchors, gate_width=args.gate_width, nhidden_caption=args.nhidden_caption, nembedding=args.nembedding, rnn_type=args.rnn_type, rnn_droptout=args.caption_use_dropout, rnn_bias=args.caption_use_bias, use_region_reg=args.region_bbox_reg, use_kernel=args.use_kernel_function) params = list(net.parameters()) for param in params: print param.size() print net # To group up the features vgg_features_fix, vgg_features_var, rpn_features, hdn_features, language_features = group_features( net) # Setting the state of the training model net.cuda() net.train() logger_path = "log/logger/{}".format(args.model_name) if os.path.exists(logger_path): shutil.rmtree(logger_path) configure(logger_path, flush_secs=5) # setting up the logger network.set_trainable(net, False) # network.weights_normal_init(net, dev=0.01) if args.finetune_language_model: print 'Only finetuning the language model from: {}'.format( args.resume_model) args.train_all = False if len(args.resume_model) == 0: raise Exception('[resume_model] not specified') network.load_net(args.resume_model, net) optimizer_select = 3 elif args.load_RPN: print 'Loading pretrained RPN: {}'.format(args.saved_model_path) args.train_all = False network.load_net(args.saved_model_path, net.rpn) net.reinitialize_fc_layers() optimizer_select = 1 elif args.resume_training: print 'Resume training from: {}'.format(args.resume_model) if len(args.resume_model) == 0: raise Exception('[resume_model] not specified') network.load_net(args.resume_model, net) args.train_all = True optimizer_select = 2 else: print 'Training from scratch.' net.rpn.initialize_parameters() net.reinitialize_fc_layers() optimizer_select = 0 args.train_all = True optimizer = network.get_optimizer(lr, optimizer_select, args, vgg_features_var, rpn_features, hdn_features, language_features) target_net = net if not os.path.exists(args.output_dir): os.mkdir(args.output_dir) top_Ns = [50, 100] best_recall = np.zeros(len(top_Ns)) if args.evaluate: recall = test(test_loader, net, top_Ns) print('======= Testing Result =======') for idx, top_N in enumerate(top_Ns): print( '[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)' .format(top_N=top_N, recall=recall[idx] * 100, best_recall=best_recall[idx] * 100)) print('==============================') else: for epoch in range(0, args.max_epoch): # Training train(train_loader, target_net, optimizer, epoch) # snapshot the state save_name = os.path.join( args.output_dir, '{}_epoch_{}.h5'.format(args.model_name, epoch)) network.save_net(save_name, net) print('save model: {}'.format(save_name)) # Testing # network.set_trainable(net, False) # Without backward(), requires_grad takes no effect recall = test(test_loader, net, top_Ns) if np.all(recall > best_recall): best_recall = recall save_name = os.path.join(args.output_dir, '{}_best.h5'.format(args.model_name)) network.save_net(save_name, net) print('\nsave model: {}'.format(save_name)) print('Epoch[{epoch:d}]:'.format(epoch=epoch)), for idx, top_N in enumerate(top_Ns): print( '\t[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)' .format(top_N=top_N, recall=recall[idx] * 100, best_recall=best_recall[idx] * 100)), # updating learning policy if epoch % args.step_size == 0 and epoch > 0: lr /= 10 args.lr = lr print '[learning rate: {}]'.format(lr) args.enable_clip_gradient = False if not args.finetune_language_model: args.train_all = True optimizer_select = 2 # update optimizer and correponding requires_grad state optimizer = network.get_optimizer(lr, optimizer_select, args, vgg_features_var, rpn_features, hdn_features, language_features)
def main(): global args print "Loading training set and testing set..." train_set = visual_genome(args.dataset_option, 'train') test_set = visual_genome(args.dataset_option, 'test') object_classes = test_set.object_classes print "Done." train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=8, pin_memory=True) net = FasterRCNN(nhidden=args.mps_feature_len, use_kmeans_anchors=args.use_kmeans_anchors, n_classes=len(object_classes), model=args.base_model) if args.resume_model: print 'Resume training from: {}'.format(args.resume_model) if len(args.resume_model) == 0: raise Exception('[resume_model] not specified') network.load_net(args.detection_model, net) # optimizer = torch.optim.SGD([ # {'params': list(net.parameters())}, # ], lr=args.lr, momentum=args.momentum, weight_decay=0.0005) else: print 'Training from scratch...Initializing network...' optimizer = torch.optim.SGD(list(net.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=0.0005) # network.set_trainable(net.features, requires_grad=True) net.cuda() if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) best_map = 0.0 for epoch in range(0, args.max_epoch): # Training train(train_loader, net, optimizer, epoch) # update learning rate if epoch % args.step_size == args.step_size - 1: args.clip_gradient = False args.lr /= 5 for param_group in optimizer.param_groups: param_group['lr'] = args.lr save_name = os.path.join( args.output_dir, '{}_epoch_{}.h5'.format(args.model_name, epoch)) network.save_net(save_name, net) print('save model: {}'.format(save_name)) try: # Testing map = evaluate(test_loader, net, object_classes) print( 'Epoch[{epoch:d}]: ' 'Recall: ' 'object: {map: .3f}%% (Best: {best_map: .3f}%%)'.format( epoch=epoch, map=map * 100, best_map=best_map * 100)) if map > best_map: best_map = map save_name = os.path.join( args.output_dir, '{}_best.h5'.format(args.model_name, epoch)) network.save_net(save_name, net) except: continue