Exemple #1
0
def main():
    ''' parse config file '''
    parser = argparse.ArgumentParser(
        description="Graph Reasoning Machine for Visual Question Answering")
    parser.add_argument("--config-file",
                        default="configs/baseline_res101.yaml")
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument("--session", type=int, default=0)
    parser.add_argument("--resume", type=int, default=0)
    parser.add_argument("--batchsize", type=int, default=0)
    parser.add_argument("--inference", action='store_true')
    parser.add_argument("--produce", action='store_true')
    parser.add_argument("--instance", type=int, default=-1)
    parser.add_argument("--use_freq_prior", action='store_true')
    parser.add_argument("--visualize", action='store_true')
    parser.add_argument("--algorithm", type=str, default='sg_baseline')
    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.resume = args.resume
    cfg.instance = args.instance
    cfg.inference = args.inference
    cfg.MODEL.USE_FREQ_PRIOR = args.use_freq_prior
    cfg.MODEL.ALGORITHM = args.algorithm
    if args.batchsize > 0:
        cfg.DATASET.TRAIN_BATCH_SIZE = args.batchsize
    if args.session > 0:
        cfg.MODEL.SESSION = str(args.session)
    # cfg.freeze()

    if not os.path.exists("logs") and get_rank() == 0:
        os.mkdir("logs")
    logger = setup_logger("scene_graph_generation",
                          "logs",
                          get_rank(),
                          filename="{}_{}.txt".format(args.algorithm,
                                                      get_timestamp()))
    logger.info(args)
    logger.info("Loaded configuration file {}".format(args.config_file))
    output_config_path = os.path.join("logs", 'config.yml')
    logger.info("Saving config into: {}".format(output_config_path))
    save_config(cfg, output_config_path)

    if args.produce:
        produce(cfg, args)
    elif not args.inference:
        model = train(cfg, args)
    else:
        test(cfg, args)
Exemple #2
0
    gt_triplets, gt_triplet_boxes, _ = _triplet(gt_pred_labels, gt_relations,
                                                gt_classes, gt_boxes,
                                                gt_predicate_scores,
                                                gt_class_scores)
    return gt_triplets, gt_triplet_boxes


if __name__ == '__main__':
    info = json.load(
        open(os.path.join(cfg.DATASET.PATH, "VG-SGG-dicts.json"), 'r'))
    itola = info['idx_to_label']
    itopred = info['idx_to_predicate']
    meters = MetricLogger(delimiter="  ")
    data_loader = build_data_loader(cfg)
    end = time.time()
    logger = setup_logger("scene_graph_generation", "logs", get_rank())
    output_config_path = os.path.join("logs", 'config.yml')
    logger.info("Saving config into: {}".format(output_config_path))

    logger = logging.getLogger("scene_graph_generation")
    logger.info("Start training")
    max_iter = len(data_loader)
    result_dic: {str: int} = defaultdict(int)

    all_images = 0
    with open('browse_data.txt', 'w') as f:
        for i, data in enumerate(data_loader):
            data_time = time.time() - end

            imgs, target, idx = data
            all_images += len(imgs.tensors)