コード例 #1
0
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader = utils.get_cad_data(
        args, prediction=True)

    # Get data size and define model
    edge_features, node_features, adj_mat, node_labels, sequence_id = training_set[
        0]
    edge_feature_size, node_feature_size = edge_features.shape[
        0], node_features.shape[0]
    model_args = {
        'model_path': args.resume,
        'edge_feature_size': edge_feature_size,
        'node_feature_size': node_feature_size,
        'message_size': edge_feature_size,
        'link_hidden_size': 1024,
        'link_hidden_layers': 2,
        'propagate_layers': 3,
        'subactivity_classes': 10,
        'affordance_classes': 12
    }
    model = models.GPNN_CAD(model_args)
    del edge_features, node_features, adj_mat, node_labels
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    criterion = torch.nn.MSELoss()
    if args.cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()

        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              logger,
              args=args)
        # test on validation set
        epoch_error = validate(valid_loader,
                               model,
                               criterion,
                               logger,
                               args=args)

        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 10:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.01:
                pass
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()

        if epoch > 0 and epoch % 5 == 0:
            args.lr *= args.lr_decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr

        is_best = epoch_error < best_epoch_error
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_epoch_error': best_epoch_error,
                'avg_epoch_error': avg_epoch_error,
                'optimizer': optimizer.state_dict(),
            },
            is_best=is_best,
            directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(
            best_epoch_error, avg_epoch_error))

    # For testing
    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    validate(test_loader, model, criterion, logger, args=args, test=True)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
コード例 #2
0
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, testing_set, train_loader, test_loader = utils.get_cad_data(
        args)
    features, labels, seg_lengths, total_length, activity, sequence_id = training_set[
        0]
    feature_size = features[0].shape[1]
    label_num = len(datasets.cad_metadata.subactivities)
    hidden_size = 256
    hidden_layers = 2

    model = models.BLSTM(feature_size, hidden_size, hidden_layers, label_num)
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    criterion = loss_func
    if args.cuda:
        model = model.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()

        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              logger,
              args=args)
        # test on validation set
        epoch_error = validate(test_loader, model, args=args)

        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 10:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.01:
                args.lr *= args.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = args.lr
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()

        is_best = epoch_error < best_epoch_error
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_epoch_error': best_epoch_error,
                'avg_epoch_error': avg_epoch_error,
                'optimizer': optimizer.state_dict(),
            },
            is_best=is_best,
            directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(
            best_epoch_error, avg_epoch_error))

    # For testing
    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    validate(test_loader, model, args=args, test=True)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
コード例 #3
0
ファイル: vcoco.py プロジェクト: tengyu-liu/Part-GPNN
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader = utils.get_vcoco_data(
        args)

    # get training hard-negatives
    if os.path.exists('obj_action_pairs_cont.pkl'):
        obj_action_pairs = pickle.load(open('obj_action_pairs_cont.pkl', 'rb'))
    else:
        obj_action_pairs = np.ones([81, 27])
        for edge_features, node_features, part_human_id, adj_mat, node_labels, node_roles, obj_boxes, part_boxes, human_boxes, img_id, img_name, human_num, part_num, obj_num, obj_classes, part_classes, part_adj_mat, _ in train_loader:
            for i in range(len(adj_mat)):
                for obj_id in range(obj_num[i]):
                    for action_id in range(
                            len(node_labels[i][human_num[i] + obj_id])):
                        obj_action_pairs[obj_classes[i][obj_id],
                                         action_id] += 1

        obj_action_pairs = obj_action_pairs / np.sum(
            obj_action_pairs, axis=-1, keepdims=True)
        pickle.dump(obj_action_pairs, open('obj_action_pairs_cont.pkl', 'wb'))

    # Evaluation setup
    if not os.path.exists(args.eval_root):
        os.makedirs(args.eval_root)
    train_vcocoeval = get_vcocoeval(args, 'train')
    val_vcocoeval = get_vcocoeval(args, 'val')
    test_vcocoeval = get_vcocoeval(args, 'test')

    # Get data size and define model
    edge_features, node_features, part_human_id, adj_mat, node_labels, node_roles, obj_boxes, part_boxes, human_boxes, img_id, img_name, human_num, part_num, obj_num, obj_classes, part_classes, part_adj_mat, _ = training_set[
        0]
    edge_feature_size, node_feature_size = edge_features.shape[
        2], node_features.shape[1]
    # message_size = int(edge_feature_size/2)*2
    # message_size = edge_feature_size*2
    # message_size = 1024
    message_size = edge_feature_size

    model_args = {
        'model_path': args.resume,
        'edge_feature_size': edge_feature_size,
        'node_feature_size': node_feature_size,
        'message_size': message_size,
        'link_hidden_size': 256,
        'link_hidden_layers': args.link_layer,
        'link_relu': False,
        'update_hidden_layers': args.update_layer,
        'update_dropout': 0.0,
        'update_bias': True,
        'propagate_layers': args.prop_layer,
        'hoi_classes': action_class_num,
        'roles_num': roles_num,
        'resize_feature_to_message_size': False,
        'feature_type': args.feature_type,
        'po_type': args.po_type,
        'suppress_hh': args.suppress_hh
    }

    if args.model_type == 'V1':
        model = models.GPNN_VCOCO(model_args)
    elif args.model_type == 'V2':
        model = models.GPNN_VCOCO_v2(model_args)
    elif args.model_type == 'PG':
        model = models.GPNN_VCOCO_PG(model_args)

    del edge_features, node_features, adj_mat
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    mse_loss = torch.nn.MSELoss(size_average=True)
    multi_label_loss = torch.nn.MultiLabelSoftMarginLoss(size_average=True)
    if args.cuda:
        model = model.cuda()
        mse_loss = mse_loss.cuda()
        multi_label_loss = multi_label_loss.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf

    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()

        # train for one epoch
        part_obj_prior = train(args, train_loader, model, mse_loss,
                               multi_label_loss, optimizer, epoch,
                               train_vcocoeval, logger, obj_action_pairs)

        if args.debug:
            break

        # test on validation set
        epoch_error = validate(args,
                               valid_loader,
                               model,
                               mse_loss,
                               multi_label_loss,
                               val_vcocoeval,
                               logger,
                               obj_action_pairs=obj_action_pairs)

        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 2:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.005:
                pass
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()

        if epoch % 2 == 1:
            print('Learning rate decrease')
            args.lr *= args.lr_decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr

        is_best = epoch_error < best_epoch_error
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_epoch_error': best_epoch_error,
                'avg_epoch_error': avg_epoch_error,
                'optimizer': optimizer.state_dict(),
            },
            is_best=is_best,
            directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(
            best_epoch_error, avg_epoch_error))

    # For testing
    # loaded_checkpoint = datasets.utils.load_best_checkpoint(args, model, optimizer)
    # if loaded_checkpoint:
    #     args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    # validate(args, test_loader, model, mse_loss, multi_label_loss, test_vcocoeval, test=True)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
    print(args.resume)
コード例 #4
0
ファイル: hico_graph.py プロジェクト: tengyu-liu/Part-GPNN
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader, img_index = utils.get_hico_data(
        args)

    # Get data size and define model
    edge_features, node_features, adj_mat, node_labels, sequence_id, det_classes, det_boxes, human_num, obj_num = training_set[
        0]
    edge_feature_size, node_feature_size = edge_features.shape[
        2], node_features.shape[1]
    model = units.LinkFunction(
        'GraphConv', {
            'edge_feature_size': edge_feature_size,
            'link_hidden_size': 512,
            'link_hidden_layers': 2,
            'link_relu': False
        })
    del edge_features, node_features, adj_mat, node_labels
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    # criterion = torch.nn.MultiLabelSoftMarginLoss(size_average=True)
    if args.cuda:
        model = model.cuda()
        # criterion = criterion.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()

        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              evaluation,
              logger,
              args=args)
        # test on validation set
        epoch_error = validate(test_loader,
                               model,
                               criterion,
                               evaluation,
                               logger,
                               args=args)

        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 2:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.01:
                args.lr *= args.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = args.lr
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()
        is_best = epoch_error < best_epoch_error
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_epoch_error': best_epoch_error,
                'avg_epoch_error': avg_epoch_error,
                'optimizer': optimizer.state_dict(),
            },
            is_best=is_best,
            directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(
            best_epoch_error, avg_epoch_error))

    # For testing
    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    validate(test_loader,
             model,
             criterion,
             evaluation,
             logger,
             args=args,
             test=True)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
コード例 #5
0
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader = utils.get_vcoco_data(args)

    # Evaluation setup
    if not os.path.exists(args.eval_root):
        os.makedirs(args.eval_root)
    train_vcocoeval = get_vcocoeval(args, 'train')
    val_vcocoeval = get_vcocoeval(args, 'val')
    test_vcocoeval = get_vcocoeval(args, 'test')

    # Get data size and define model
    edge_features, node_features, adj_mat, node_labels, node_roles, boxes, img_id, img_name, human_num, obj_num, classes = training_set[0]
    edge_feature_size, node_feature_size = edge_features.shape[2], node_features.shape[1]
    # message_size = int(edge_feature_size/2)*2
    # message_size = edge_feature_size*2
    # message_size = 1024
    message_size = edge_feature_size
    model_args = {'model_path': args.resume, 'edge_feature_size': edge_feature_size, 'node_feature_size': node_feature_size, 'message_size': message_size, 'link_hidden_size': 256, 'link_hidden_layers': 3, 'link_relu': False, 'update_hidden_layers': 1, 'update_dropout': False, 'update_bias': True, 'propagate_layers': 3, 'hoi_classes': action_class_num, 'roles_num': roles_num, 'resize_feature_to_message_size': False, 'feature_type': args.feature_type}
    model = models.GPNN_VCOCO(model_args)
    del edge_features, node_features, adj_mat, node_labels
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    mse_loss = torch.nn.MSELoss(size_average=True)
    multi_label_loss = torch.nn.MultiLabelSoftMarginLoss(size_average=True)
    if args.cuda:
        model = model.cuda()
        mse_loss = mse_loss.cuda()
        multi_label_loss = multi_label_loss.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()

        # train for one epoch
        train(args, train_loader, model, mse_loss, multi_label_loss, optimizer, epoch, train_vcocoeval, logger)
        # test on validation set
        epoch_error = validate(args, valid_loader, model, mse_loss, multi_label_loss, val_vcocoeval, logger)

        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 2:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.005:
                pass
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()

        if epoch % 2 == 1:
            print('Learning rate decrease')
            args.lr *= args.lr_decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr

        is_best = epoch_error < best_epoch_error
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(),
                                        'best_epoch_error': best_epoch_error, 'avg_epoch_error': avg_epoch_error,
                                        'optimizer': optimizer.state_dict(), },
                                       is_best=is_best, directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(best_epoch_error,  avg_epoch_error))

    # For testing
    loaded_checkpoint = datasets.utils.load_best_checkpoint(args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    validate(args, test_loader, model, mse_loss, multi_label_loss, test_vcocoeval, test=True)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
コード例 #6
0
ファイル: hico.py プロジェクト: YuxuanSnow/gpnn
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader, img_index = utils.get_hico_data(args) #load the data with given batch size

    # Get data size and define model
    edge_features, node_features, adj_mat, node_labels, sequence_id, det_classes, det_boxes, human_num, obj_num = training_set[0]

    edge_feature_size, node_feature_size = edge_features.shape[2], node_features.shape[1]
    message_size = int(edge_feature_size/2)*2
    model_args = {'model_path': args.resume, 'edge_feature_size': edge_feature_size, 'node_feature_size': node_feature_size, 'message_size': message_size, 'link_hidden_size': 512, 'link_hidden_layers': 2, 'link_relu': False, 'update_hidden_layers': 1, 'update_dropout': False, 'update_bias': True, 'propagate_layers': 3, 'hoi_classes': action_class_num, 'resize_feature_to_message_size': False}
    model = models.GPNN_HICO(model_args) # construct GPNN structure for hico dataset
    del edge_features, node_features, adj_mat, node_labels
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    mse_loss = torch.nn.MSELoss(size_average=True)
    multi_label_loss = torch.nn.MultiLabelSoftMarginLoss(size_average=True)
    if args.cuda:
        model = model.cuda()
        mse_loss = mse_loss.cuda()
        multi_label_loss = multi_label_loss.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf


    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()
        # train for one epoch
        train(train_loader, model, mse_loss, multi_label_loss, optimizer, epoch, logger)
        # test on validation set
        epoch_error = validate(valid_loader, model, mse_loss, multi_label_loss, logger)
        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 2:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.005:
                print('Learning rate decrease')
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()

        if epoch % 5 == 0 and epoch > 0:
            args.lr *= args.lr_decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr
        is_best = True
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint({'epoch': epoch + 1, 'state_dict': model.state_dict(),
                                        'best_epoch_error': best_epoch_error, 'avg_epoch_error': avg_epoch_error,
                                        'optimizer': optimizer.state_dict(), },
                                       is_best=is_best, directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(best_epoch_error,  avg_epoch_error))

    # For testing
    loaded_checkpoint = datasets.utils.load_best_checkpoint(args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    # validate(test_loader, model, mse_loss, multi_label_loss, test=True)
    gen_test_result(args, test_loader, model, mse_loss, multi_label_loss, img_index)

    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
コード例 #7
0
def main(args):
    np.random.seed(0)
    torch.manual_seed(0)
    start_time = time.time()
    # args.resume = None

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader = utils.get_cad_data(
        args)

    # Get data size and define model
    edge_features, node_features, adj_mat, node_labels, sequence_id = training_set[
        0]
    model = units.LinkFunction(
        'GraphConvLSTM', {
            'edge_feature_size': edge_features.shape[0],
            'link_hidden_size': 1024,
            'link_hidden_layers': 2
        })
    # model = units.LinkFunction('GraphConv', {'edge_feature_size': edge_features.shape[0], 'link_hidden_size': 1024, 'link_hidden_layers': 3})
    del edge_features, node_features, adj_mat, node_labels
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    criterion = torch.nn.MSELoss()
    evaluation = lambda output, target: torch.mean(torch.abs(output - target))
    if args.cuda:
        model = model.cuda()
        criterion = criterion.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

    epoch_errors = list()
    avg_epoch_error = np.inf
    best_epoch_error = np.inf
    for epoch in range(args.start_epoch, args.epochs):
        logger.log_value('learning_rate', args.lr).step()

        # train for one epoch
        train(train_loader,
              model,
              criterion,
              optimizer,
              epoch,
              evaluation,
              logger,
              args=args)
        # test on validation set
        epoch_error = validate(valid_loader,
                               model,
                               criterion,
                               evaluation,
                               logger,
                               args=args)

        epoch_errors.append(epoch_error)
        if len(epoch_errors) == 15:
            new_avg_epoch_error = np.mean(np.array(epoch_errors))
            if avg_epoch_error - new_avg_epoch_error < 0.03:
                args.lr *= args.lr_decay
                for param_group in optimizer.param_groups:
                    param_group['lr'] = args.lr
            avg_epoch_error = new_avg_epoch_error
            epoch_errors = list()

        is_best = epoch_error < best_epoch_error
        best_epoch_error = min(epoch_error, best_epoch_error)
        datasets.utils.save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_epoch_error': best_epoch_error,
                'avg_epoch_error': avg_epoch_error,
                'optimizer': optimizer.state_dict(),
            },
            is_best=is_best,
            directory=args.resume)
        print('best_epoch_error: {}, avg_epoch_error: {}'.format(
            best_epoch_error, avg_epoch_error))

    # For testing
    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    validate(test_loader, model, criterion, evaluation, args=args)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))
コード例 #8
0
def main(args):
    # np.random.seed(0)
    # torch.manual_seed(0)
    start_time = time.time()

    args.cuda = not args.no_cuda and torch.cuda.is_available()
    timestamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y-%m-%d %H:%M:%S')
    logger = logutil.Logger(os.path.join(args.log_root, timestamp))

    # Load data
    training_set, valid_set, testing_set, train_loader, valid_loader, test_loader = utils.get_vcoco_data(
        args)

    # Evaluation setup
    if not os.path.exists(args.eval_root):
        os.makedirs(args.eval_root)
    train_vcocoeval = get_vcocoeval(args, 'train')
    val_vcocoeval = get_vcocoeval(args, 'val')
    test_vcocoeval = get_vcocoeval(args, 'test')

    # Get data size and define model
    edge_features, node_features, part_human_id, adj_mat, node_labels, node_roles, obj_boxes, part_boxes, human_boxes, img_id, img_name, human_num, part_num, obj_num, obj_classes, part_classes, part_adj_mat, _ = training_set[
        0]
    edge_feature_size, node_feature_size = edge_features.shape[
        2], node_features.shape[1]
    # message_size = int(edge_feature_size/2)*2
    # message_size = edge_feature_size*2
    # message_size = 1024
    message_size = edge_feature_size

    model_args = {
        'model_path': args.resume,
        'edge_feature_size': edge_feature_size,
        'node_feature_size': node_feature_size,
        'message_size': message_size,
        'link_hidden_size': 256,
        'link_hidden_layers': args.link_layer,
        'link_relu': False,
        'update_hidden_layers': args.update_layer,
        'update_dropout': 0.0,
        'update_bias': True,
        'propagate_layers': args.prop_layer,
        'hoi_classes': action_class_num,
        'roles_num': roles_num,
        'resize_feature_to_message_size': False,
        'feature_type': args.feature_type
    }

    model = models.GPNN_VCOCO(model_args)
    del edge_features, node_features, adj_mat
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    mse_loss = torch.nn.MSELoss(size_average=True)
    multi_label_loss = torch.nn.MultiLabelSoftMarginLoss(size_average=True)
    if args.cuda:
        model = model.cuda()
        mse_loss = mse_loss.cuda()
        multi_label_loss = multi_label_loss.cuda()

    loaded_checkpoint = datasets.utils.load_best_checkpoint(
        args, model, optimizer)
    if loaded_checkpoint:
        args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint

        visualize(args, train_loader, model, mse_loss, multi_label_loss,
                  val_vcocoeval, logger)
        visualize(args, valid_loader, model, mse_loss, multi_label_loss,
                  val_vcocoeval, logger)
        visualize(args, test_loader, model, mse_loss, multi_label_loss,
                  val_vcocoeval, logger)

    # For testing
    # loaded_checkpoint = datasets.utils.load_best_checkpoint(args, model, optimizer)
    # if loaded_checkpoint:
    #     args, best_epoch_error, avg_epoch_error, model, optimizer = loaded_checkpoint
    # validate(args, test_loader, model, mse_loss, multi_label_loss, test_vcocoeval, test=True)
    print('Time elapsed: {:.2f}s'.format(time.time() - start_time))