Beispiel #1
0
def main():
    global args
    print "Loading training set and testing set..."
    train_set = visual_genome(args.dataset_option, 'train')
    test_set = visual_genome('small', 'test')
    print "Done."

    train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=8, pin_memory=True)
    net = RPN(not args.use_normal_anchors)
    if args.resume_training:
        print 'Resume training from: {}'.format(args.resume_model)
        if len(args.resume_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.resume_model, net)
        optimizer = torch.optim.SGD([
                {'params': list(net.parameters())[26:]}, 
                ], lr=args.lr, momentum=args.momentum, weight_decay=0.0005)
    else:
        print 'Training from scratch...Initializing network...'
        optimizer = torch.optim.SGD(list(net.parameters())[26:], lr=args.lr, momentum=args.momentum, weight_decay=0.0005)

    network.set_trainable(net.features, requires_grad=False)
    net.cuda()

    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    best_recall = 0.0
    
    for epoch in range(0, args.max_epoch):
        
        # Training
        # train(train_loader, net, optimizer, epoch)

        # Testing
        recall, RPN_precision, RPN_recall = test(test_loader, net)
        print('Epoch[{epoch:d}]: '
              'Recall: '
              'object: {recall: .3f}%% (Best: {best_recall: .3f}%%)'.format(
               epoch = epoch, recall=recall * 100, best_recall=best_recall * 100))
        print('object: {precision: .3f}%% '
              'object: {recall: .3f}%% '.format(precision=RPN_precision*100, recall=RPN_recall*100))

        # update learning rate
        if epoch % args.step_size == 0:
            args.disable_clip_gradient = True
            args.lr /= 10
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr

        save_name = os.path.join(args.output_dir, '{}_epoch_{}.h5'.format(args.model_name, epoch))
        network.save_net(save_name, net)
        print('save model: {}'.format(save_name))

        if np.all(recall > best_recall):
            best_recall = recall
            save_name = os.path.join(args.output_dir, '{}_best.h5'.format(args.model_name, epoch))
            network.save_net(save_name, net)
Beispiel #2
0
    def __init__(self, bn=False):
        super(VGG16, self).__init__()

        self.conv1 = nn.Sequential(Conv2d(3, 64, 3, same_padding=True, bn=bn),
                                   Conv2d(64, 64, 3, same_padding=True, bn=bn),
                                   nn.MaxPool2d(2))
        self.conv2 = nn.Sequential(
            Conv2d(64, 128, 3, same_padding=True, bn=bn),
            Conv2d(128, 128, 3, same_padding=True, bn=bn), nn.MaxPool2d(2))
        network.set_trainable(self.conv1, requires_grad=False)
        network.set_trainable(self.conv2, requires_grad=False)

        self.conv3 = nn.Sequential(
            Conv2d(128, 256, 3, same_padding=True, bn=bn),
            Conv2d(256, 256, 3, same_padding=True, bn=bn),
            Conv2d(256, 256, 3, same_padding=True, bn=bn), nn.MaxPool2d(2))
        self.conv4 = nn.Sequential(
            Conv2d(256, 512, 3, same_padding=True, bn=bn),
            Conv2d(512, 512, 3, same_padding=True, bn=bn),
            Conv2d(512, 512, 3, same_padding=True, bn=bn), nn.MaxPool2d(2))
        self.conv5 = nn.Sequential(
            Conv2d(512, 512, 3, same_padding=True, bn=bn),
            Conv2d(512, 512, 3, same_padding=True, bn=bn),
            Conv2d(512, 512, 3, same_padding=True, bn=bn))
Beispiel #3
0
def main():
    global args, optimizer_select
    # To set the model name automatically
    print args
    lr = args.lr
    args = get_model_name(args)
    print 'Model name: {}'.format(args.model_name)

    # To set the random seed
    random.seed(args.seed)
    torch.manual_seed(args.seed + 1)
    torch.cuda.manual_seed(args.seed + 2)

    print("Loading training set and testing set..."),
    train_set = visual_genome(args.dataset_option, 'train')
    test_set = visual_genome('small', 'test')
    print("Done.")

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=1,
                                               shuffle=True,
                                               num_workers=8,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=8,
                                              pin_memory=True)

    # Model declaration
    net = Hierarchical_Descriptive_Model(
        nhidden=args.mps_feature_len,
        n_object_cats=train_set.num_object_classes,
        n_predicate_cats=train_set.num_predicate_classes,
        n_vocab=train_set.voc_size,
        voc_sign=train_set.voc_sign,
        max_word_length=train_set.max_size,
        MPS_iter=args.MPS_iter,
        use_language_loss=not args.disable_language_model,
        object_loss_weight=train_set.inverse_weight_object,
        predicate_loss_weight=train_set.inverse_weight_predicate,
        dropout=args.dropout,
        use_kmeans_anchors=not args.use_normal_anchors,
        gate_width=args.gate_width,
        nhidden_caption=args.nhidden_caption,
        nembedding=args.nembedding,
        rnn_type=args.rnn_type,
        rnn_droptout=args.caption_use_dropout,
        rnn_bias=args.caption_use_bias,
        use_region_reg=args.region_bbox_reg,
        use_kernel=args.use_kernel_function)

    params = list(net.parameters())
    for param in params:
        print param.size()
    print net

    # To group up the features
    vgg_features_fix, vgg_features_var, rpn_features, hdn_features, language_features = group_features(
        net)

    # Setting the state of the training model
    net.cuda()
    net.train()
    logger_path = "log/logger/{}".format(args.model_name)
    if os.path.exists(logger_path):
        shutil.rmtree(logger_path)
    configure(logger_path, flush_secs=5)  # setting up the logger

    network.set_trainable(net, False)
    #  network.weights_normal_init(net, dev=0.01)
    if args.finetune_language_model:
        print 'Only finetuning the language model from: {}'.format(
            args.resume_model)
        args.train_all = False
        if len(args.resume_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.resume_model, net)
        optimizer_select = 3

    elif args.load_RPN:
        print 'Loading pretrained RPN: {}'.format(args.saved_model_path)
        args.train_all = False
        network.load_net(args.saved_model_path, net.rpn)
        net.reinitialize_fc_layers()
        optimizer_select = 1

    elif args.resume_training:
        print 'Resume training from: {}'.format(args.resume_model)
        if len(args.resume_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.resume_model, net)
        args.train_all = True
        optimizer_select = 2

    else:
        print 'Training from scratch.'
        net.rpn.initialize_parameters()
        net.reinitialize_fc_layers()
        optimizer_select = 0
        args.train_all = True

    optimizer = network.get_optimizer(lr, optimizer_select, args,
                                      vgg_features_var, rpn_features,
                                      hdn_features, language_features)

    target_net = net
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    top_Ns = [50, 100]
    best_recall = np.zeros(len(top_Ns))

    if args.evaluate:
        recall = test(test_loader, net, top_Ns)
        print('======= Testing Result =======')
        for idx, top_N in enumerate(top_Ns):
            print(
                '[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)'
                .format(top_N=top_N,
                        recall=recall[idx] * 100,
                        best_recall=best_recall[idx] * 100))

        print('==============================')
    else:
        for epoch in range(0, args.max_epoch):
            # Training
            train(train_loader, target_net, optimizer, epoch)
            # snapshot the state
            save_name = os.path.join(
                args.output_dir,
                '{}_epoch_{}.h5'.format(args.model_name, epoch))
            network.save_net(save_name, net)
            print('save model: {}'.format(save_name))

            # Testing
            # network.set_trainable(net, False) # Without backward(), requires_grad takes no effect

            recall = test(test_loader, net, top_Ns)

            if np.all(recall > best_recall):
                best_recall = recall
                save_name = os.path.join(args.output_dir,
                                         '{}_best.h5'.format(args.model_name))
                network.save_net(save_name, net)
                print('\nsave model: {}'.format(save_name))

            print('Epoch[{epoch:d}]:'.format(epoch=epoch)),
            for idx, top_N in enumerate(top_Ns):
                print(
                    '\t[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)'
                    .format(top_N=top_N,
                            recall=recall[idx] * 100,
                            best_recall=best_recall[idx] * 100)),

            # updating learning policy
            if epoch % args.step_size == 0 and epoch > 0:
                lr /= 10
                args.lr = lr
                print '[learning rate: {}]'.format(lr)

                args.enable_clip_gradient = False
                if not args.finetune_language_model:
                    args.train_all = True
                    optimizer_select = 2
                # update optimizer and correponding requires_grad state
                optimizer = network.get_optimizer(lr, optimizer_select, args,
                                                  vgg_features_var,
                                                  rpn_features, hdn_features,
                                                  language_features)
Beispiel #4
0
def main():
    global args, optimizer_select
    # To set the model name automatically
    print args
    lr = args.lr
    args = get_model_name(args)
    print 'Model name: {}'.format(args.model_name)

    # To set the random seed
    random.seed(args.seed)
    torch.manual_seed(args.seed + 1)
    torch.cuda.manual_seed(args.seed + 2)

    print("Loading training set and testing set...")
    train_set = visual_genome(args.dataset_option, 'train')
    test_set = visual_genome(args.dataset_option, 'test')
    print("Done.")

    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=1,
                                               shuffle=True,
                                               num_workers=8,
                                               pin_memory=True)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=1,
                                              shuffle=True,
                                              num_workers=8,
                                              pin_memory=True)

    net = Hierarchical_Descriptive_Model(
        nhidden=args.mps_feature_len,
        n_object_cats=train_set.num_object_classes,
        n_predicate_cats=train_set.num_predicate_classes,
        MPS_iter=args.MPS_iter,
        object_loss_weight=train_set.inverse_weight_object,
        predicate_loss_weight=train_set.inverse_weight_predicate,
        dropout=args.dropout,
        use_kmeans_anchors=args.use_kmeans_anchors,
        base_model=args.base_model)  #True

    # params = list(net.parameters())
    # for param in params:
    #     print param.size()
    print net

    # Setting the state of the training model
    net.cuda()
    net.train()
    network.set_trainable(net, False)
    # network.weights_normal_init(net, dev=0.01)

    if args.resume_model:
        print 'Resume training from: {}'.format(args.HDN_model)
        if len(args.HDN_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.HDN_model, net)
        # network.load_net(args.RPN_model, net.rpn)
        args.train_all = True
        optimizer_select = 3

    elif args.load_RCNN:
        print 'Loading pretrained RCNN: {}'.format(args.RCNN_model)
        args.train_all = False
        network.load_net(args.RCNN_model, net.rcnn)
        optimizer_select = 2

    elif args.load_RPN:
        print 'Loading pretrained RPN: {}'.format(args.RPN_model)
        args.train_all = False
        network.load_net(args.RPN_model, net.rpn)
        net.reinitialize_fc_layers()
        optimizer_select = 1

    else:
        print 'Training from scratch.'
        net.rpn.initialize_parameters()
        net.reinitialize_fc_layers()
        optimizer_select = 0
        args.train_all = True

    # To group up the features
    # vgg_features_fix, vgg_features_var, rpn_features, hdn_features = group_features(net)
    basenet_features, rpn_features, rcnn_feature, hdn_features = group_features(
        net)
    optimizer = network.get_optimizer(lr, optimizer_select, args,
                                      basenet_features, rpn_features,
                                      rcnn_feature, hdn_features)

    target_net = net
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    top_Ns = [50, 100]
    best_recall = np.zeros(len(top_Ns))

    if args.evaluate:
        recall = test(test_loader, target_net, top_Ns,
                      train_set.object_classes)
        print('======= Testing Result =======')
        for idx, top_N in enumerate(top_Ns):
            print(
                '[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)'
                .format(top_N=top_N,
                        recall=recall[idx] * 100,
                        best_recall=best_recall[idx] * 100))

        print('==============================')
    else:
        for epoch in range(0, args.max_epoch):
            # Training
            train(train_loader, target_net, optimizer, epoch)
            # snapshot the state
            save_name = os.path.join(
                args.output_dir,
                '{}_epoch_{}.h5'.format(args.model_name, epoch))
            network.save_net(save_name, net)
            print('save model: {}'.format(save_name))

            recall = test(test_loader, target_net, top_Ns,
                          train_set.object_classes)

            if np.all(recall > best_recall):
                best_recall = recall
                save_name = os.path.join(args.output_dir,
                                         '{}_best.h5'.format(args.model_name))
                network.save_net(save_name, net)
                print('\nsave model: {}'.format(save_name))

            print('Epoch[{epoch:d}]:'.format(epoch=epoch)),
            for idx, top_N in enumerate(top_Ns):
                print(
                    '\t[Recall@{top_N:d}] {recall:2.3f}%% (best: {best_recall:2.3f}%%)'
                    .format(top_N=top_N,
                            recall=recall[idx] * 100,
                            best_recall=best_recall[idx] * 100))

            # updating learning policy
            if (epoch + 1) % args.step_size == 0 or (epoch + 1) % (
                    args.step_size + 2) == 0:
                lr /= 10
                args.lr = lr
                print '[learning rate: {}]'.format(lr)

                args.enable_clip_gradient = False
                args.train_all = False
                optimizer_select = 2
                # update optimizer and correponding requires_grad state
                optimizer = network.get_optimizer(lr, optimizer_select, args,
                                                  basenet_features,
                                                  rpn_features, rcnn_feature,
                                                  hdn_features)
Beispiel #5
0
def main():
    global args, optimizer_select
    # To set the model name automatically
    print args
    lr = args.lr
    args = get_model_name(args)
    print 'Model name: {}'.format(args.model_name)

    # To set the random seed
    random.seed(args.seed)
    torch.manual_seed(args.seed + 1)
    torch.cuda.manual_seed(args.seed + 2)

    # print("Loading training set and testing set..."),
    # train_set = visual_genome(args.dataset_option, 'train')
    # test_set = visual_genome('small', 'test')

    # print("Done.")

    # train_loader = torch.utils.data.DataLoader(train_set, batch_size=1, shuffle=True, num_workers=8, pin_memory=True)
    # test_loader = torch.utils.data.DataLoader(test_set, batch_size=1, shuffle=False, num_workers=8, pin_memory=True)

    image_set = prepare_image(datapath=args.total_image_path)
    image_loader = torch.utils.data.DataLoader(image_set,
                                               batch_size=1,
                                               shuffle=False,
                                               num_workers=8,
                                               pin_memory=True)

    # Model declaration
    net = Hierarchical_Descriptive_Model(
        nhidden=args.mps_feature_len,
        n_object_cats=5,
        n_predicate_cats=5,
        n_vocab=5,
        voc_sign=5,
        max_word_length=5,
        MPS_iter=args.MPS_iter,
        use_language_loss=not args.disable_language_model,
        object_loss_weight=5,
        predicate_loss_weight=5,
        dropout=args.dropout,
        use_kmeans_anchors=not args.use_normal_anchors,
        gate_width=args.gate_width,
        nhidden_caption=args.nhidden_caption,
        nembedding=args.nembedding,
        rnn_type=args.rnn_type,
        rnn_droptout=args.caption_use_dropout,
        rnn_bias=args.caption_use_bias,
        use_region_reg=args.region_bbox_reg,
        use_kernel=args.use_kernel_function)

    params = list(net.parameters())
    for param in params:
        print param.size()
    print net

    # To group up the features
    vgg_features_fix, vgg_features_var, rpn_features, hdn_features, language_features = group_features(
        net)

    # Setting the state of the training model
    net.cuda()
    net.train()
    logger_path = "log/logger/{}".format(args.model_name)
    if os.path.exists(logger_path):
        shutil.rmtree(logger_path)
    configure(logger_path, flush_secs=5)  # setting up the logger

    network.set_trainable(net, False)
    #  network.weights_normal_init(net, dev=0.01)
    if args.finetune_language_model:
        print 'Only finetuning the language model from: {}'.format(
            args.resume_model)
        args.train_all = False
        if len(args.resume_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.resume_model, net)
        optimizer_select = 3

    elif args.load_RPN:
        print 'Loading pretrained RPN: {}'.format(args.saved_model_path)
        args.train_all = False
        network.load_net(args.saved_model_path, net.rpn)
        net.reinitialize_fc_layers()
        optimizer_select = 1

    elif args.resume_training:
        print 'Resume training from: {}'.format(args.resume_model)
        if len(args.resume_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.resume_model, net)
        args.train_all = True
        optimizer_select = 2

    else:
        print 'Training from scratch.'
        net.rpn.initialize_parameters()
        net.reinitialize_fc_layers()
        optimizer_select = 0
        args.train_all = True

    optimizer = network.get_optimizer(lr, optimizer_select, args,
                                      vgg_features_var, rpn_features,
                                      hdn_features, language_features)

    target_net = net
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    top_Ns = [50, 100]
    best_recall = np.zeros(len(top_Ns))

    extract_features(image_loader, net)
Beispiel #6
0
    def __init__(self):
        # To set the model name automatically
        args = parser.parse_args()
        print args
        args = get_model_name(args)
        print 'Model name: {}'.format(args.model_name)
        self.check = True

        # To set the random seed
        random.seed(args.seed)
        torch.manual_seed(args.seed + 1)
        torch.cuda.manual_seed(args.seed + 2)

        print("Loading training params"),
        self.train_set = visual_genome('normal', 'train')
        print("Done.")

        self.train_loader = torch.utils.data.DataLoader(self.train_set,
                                                        batch_size=1,
                                                        shuffle=True,
                                                        num_workers=8,
                                                        pin_memory=True)
        end = time.time()
        # Model declaration
        self.net = Hierarchical_Descriptive_Model(
            nhidden=args.mps_feature_len,
            n_object_cats=self.train_set.num_object_classes,
            n_predicate_cats=self.train_set.num_predicate_classes,
            n_vocab=self.train_set.voc_size,
            voc_sign=self.train_set.voc_sign,
            max_word_length=self.train_set.max_size,
            MPS_iter=args.MPS_iter,
            use_language_loss=not args.disable_language_model,
            object_loss_weight=self.train_set.inverse_weight_object,
            predicate_loss_weight=self.train_set.inverse_weight_predicate,
            dropout=args.dropout,
            use_kmeans_anchors=not args.use_normal_anchors,
            gate_width=args.gate_width,
            nhidden_caption=args.nhidden_caption,
            nembedding=args.nembedding,
            rnn_type=args.rnn_type,
            rnn_droptout=args.caption_use_dropout,
            rnn_bias=args.caption_use_bias,
            use_region_reg=args.region_bbox_reg,
            use_kernel=args.use_kernel_function)

        params = list(self.net.parameters())
        for param in params:
            print param.size()
        print self.net

        # To group up the features
        vgg_features_fix, vgg_features_var, rpn_features, hdn_features, language_features = group_features(
            self.net)

        # Setting the state of the training model
        self.net.cuda()
        self.net.train()
        network.set_trainable(self.net, False)

        # loading model for inference
        print 'Resume training from: {}'.format(args.resume_model)
        if len(args.resume_model) == 0:
            raise Exception('[resume_model] not specified')
        network.load_net(args.resume_model, self.net)
        args.train_all = True
        optimizer_select = 2

        optimizer = network.get_optimizer(args.lr, optimizer_select, args,
                                          vgg_features_var, rpn_features,
                                          hdn_features, language_features)

        target_net = self.net
        self.net.eval()
        print('Model Loading time: ', time.time() - end)

        # Set topics
        self.bridge = CvBridge()
        self.dot = Digraph(comment='warehouse', format='svg')
        self.regions_dot = Digraph(comment='regions', format='svg')

        self.image_sub = message_filters.Subscriber(
            '/turtlebot2i/camera/rgb/raw_image', Image)
        self.image_depth_sub = message_filters.Subscriber(
            '/turtlebot2i/camera/depth/raw_image', Image)
        self.ts = message_filters.TimeSynchronizer(
            [self.image_sub, self.image_depth_sub], queue_size=1)
        print('calling callback')
        self.ts.registerCallback(self.callback)
        self.scenegraph_pub = rospy.Publisher('/turtlebot2i/scene_graph',
                                              SceneGraph,
                                              queue_size=10)