Exemple #1
0
    # This net is used to provides setup settings. It is not used for testing.

    checkpoint = torch.load(args.weights)

    print("model epoch {} loss: {}".format(checkpoint['epoch'],
                                           checkpoint['best_loss']))
    base_dict = {
        '.'.join(k.split('.')[1:]): v
        for k, v in list(checkpoint['state_dict'].items())
    }
    stats = checkpoint['reg_stats'].numpy()

    dataset = PGCNDataSet(dataset_configs,
                          graph_configs,
                          prop_file=dataset_configs['test_prop_file'],
                          prop_dict_path=dataset_configs['test_dict_path'],
                          ft_path=dataset_configs['test_ft_path'],
                          test_mode=True)

    iou_dict = dataset.act_iou_dict
    dis_dict = dataset.act_dis_dict

    index_queue = ctx.Queue()
    result_queue = ctx.Queue()
    workers = [
        ctx.Process(target=runner_func,
                    args=(dataset, base_dict, stats,
                          gpu_list[i % len(gpu_list)], index_queue,
                          result_queue, iou_dict, dis_dict))
        for i in range(args.workers)
    ]
Exemple #2
0
def main():
    global args, best_loss, writer, adj_num, logger

    configs = get_and_save_args(parser)
    parser.set_defaults(**configs)
    dataset_configs = configs["dataset_configs"]
    model_configs = configs["model_configs"]
    graph_configs = configs["graph_configs"]
    args = parser.parse_args()
    """copy codes and creat dir for saving models and logs"""
    if not os.path.isdir(args.snapshot_pref):
        os.makedirs(args.snapshot_pref)

    logger = get_logger(args)
    logger.info('\ncreating folder: ' + args.snapshot_pref)

    # if not args.evaluate:
    writer = SummaryWriter(args.snapshot_pref)
    #    recorder = Recorder(args.snapshot_pref, ["models", "__pycache__"])
    #    recorder.writeopt(args)

    logger.info('\nruntime args\n\n{}\n\nconfig\n\n{}'.format(
        args, dataset_configs))
    """construct model"""
    model = PGCN(model_configs, graph_configs)
    policies = model.get_optim_policies()
    model = torch.nn.DataParallel(model, device_ids=args.gpus).cuda()
    if args.resume:
        if os.path.isfile(args.resume):
            logger.info(("=> loading checkpoint '{}'".format(args.resume)))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_loss = checkpoint['best_loss']
            model.load_state_dict(checkpoint['state_dict'])
            logger.info(("=> loaded checkpoint '{}' (epoch {})".format(
                args.evaluate, checkpoint['epoch'])))
        else:
            logger.info(("=> no checkpoint found at '{}'".format(args.resume)))
    """construct dataset"""
    train_loader = torch.utils.data.DataLoader(
        PGCNDataSet(
            dataset_configs,
            graph_configs,
            prop_file=dataset_configs['train_prop_file'],
            prop_dict_path=dataset_configs['train_dict_path'],
            ft_path=dataset_configs['train_ft_path'],
            epoch_multiplier=dataset_configs['training_epoch_multiplier'],
            test_mode=False),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.workers,
        pin_memory=True,
        drop_last=True)  # in training we drop the last incomplete minibatch

    val_loader = torch.utils.data.DataLoader(PGCNDataSet(
        dataset_configs,
        graph_configs,
        prop_file=dataset_configs['test_prop_file'],
        prop_dict_path=dataset_configs['val_dict_path'],
        ft_path=dataset_configs['test_ft_path'],
        epoch_multiplier=dataset_configs['testing_epoch_multiplier'],
        reg_stats=train_loader.dataset.stats,
        test_mode=False),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)
    """loss and optimizer"""
    activity_criterion = torch.nn.CrossEntropyLoss().cuda()
    completeness_criterion = CompletenessLoss().cuda()
    regression_criterion = ClassWiseRegressionLoss().cuda()

    for group in policies:
        logger.info(
            ('group: {} has {} params, lr_mult: {}, decay_mult: {}'.format(
                group['name'], len(group['params']), group['lr_mult'],
                group['decay_mult'])))

    optimizer = torch.optim.SGD(policies,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if args.evaluate:
        validate(val_loader, model, activity_criterion, completeness_criterion,
                 regression_criterion, 0)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch, args.lr_steps)
        train(train_loader, model, activity_criterion, completeness_criterion,
              regression_criterion, optimizer, epoch)

        # evaluate on validation set
        if (epoch + 1) % args.eval_freq == 0 or epoch == args.epochs - 1:
            loss = validate(val_loader, model, activity_criterion,
                            completeness_criterion, regression_criterion,
                            (epoch + 1) * len(train_loader))
            # remember best validation loss and save checkpoint
            is_best = loss < best_loss
            best_loss = min(loss, best_loss)
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'best_loss': loss,
                    'reg_stats': torch.from_numpy(train_loader.dataset.stats)
                },
                is_best,
                epoch,
                filename='checkpoint.pth.tar')

    writer.close()
    rel_props = score_pickle_list[0][vid][0]

    return rel_props, \
           merge_part(arrays, 1, act_weights), \
           merge_part(arrays, 2, comp_weights), \
           merge_part(arrays, 3, reg_weights)


print('Merge detection scores from {} sources...'.format(
    len(score_pickle_list)))
detection_scores = {k: merge_scores(k) for k in score_pickle_list[0]}
print('Done.')

dataset = PGCNDataSet(dataset_configs,
                      graph_configs,
                      prop_file=dataset_configs['test_prop_file'],
                      prop_dict_path=dataset_configs['train_dict_path'],
                      ft_path=dataset_configs['train_ft_path'],
                      test_mode=True)

dataset_detections = [dict() for i in range(num_class)]

if args.cls_scores:
    print('Using classifier scores from {}'.format(args.cls_scores))
    cls_score_pc = pickle.load(open(args.cls_scores, 'rb'), encoding='bytes')
    cls_score_dict = {
        os.path.splitext(os.path.basename(k.decode('utf-8')))[0]: v
        for k, v in cls_score_pc.items()
    }
else:
    cls_score_dict = None