def main():
    initialize_dataset(args.dataset)
    build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
    dataset = build_symbolic_dataset(args)
    dataloader = dataset.make_dataloader(32, False, False, nr_workers=4)
    meters = GroupMeters()

    for idx, feed_dict in tqdm_gofor(dataloader):
        feed_dict = GView(feed_dict)

        for i, (p, s, gt) in enumerate(
                zip(feed_dict.program_seq, feed_dict.scene, feed_dict.answer)):
            _, pred = execute_program(p, s)

            if pred[0] == 'error':
                raise pred[1]

            if pred[1] != gt:
                print(p)
                print(s)

                from IPython import embed
                embed()
                from sys import exit
                exit()

            meters.update('accuracy', pred[1] == gt)
        get_current_tqdm().set_description(
            meters.format_simple('Exec:', 'val', compressed=True))

    logger.critical(
        meters.format_simple('Symbolic execution test:',
                             'avg',
                             compressed=False))
def main():
    initialize_dataset(args.dataset)
    build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
    dataset = build_symbolic_dataset(args)

    if args.nr_vis is None:
        args.nr_vis = min(100, len(dataset))

    if args.random:
        indices = random.choice(len(dataset), size=args.nr_vis, replace=False)
    else:
        indices = list(range(args.nr_vis))

    vis = HTMLTableVisualizer(args.data_vis_dir,
                              'Dataset: ' + args.dataset.upper())
    vis.begin_html()
    with vis.table('Metainfo', [
            HTMLTableColumnDesc('k', 'Key', 'text', {}, None),
            HTMLTableColumnDesc('v', 'Value', 'code', {}, None)
    ]):
        for k, v in args.__dict__.items():
            vis.row(k=k, v=v)

    with vis.table('Visualize', [
            HTMLTableColumnDesc('id', 'QuestionID', 'text', {}, None),
            HTMLTableColumnDesc('image', 'QA', 'figure', {'width': '100%'},
                                None),
            HTMLTableColumnDesc(
                'qa', 'QA', 'text', css=None, td_css={'width': '30%'}),
            HTMLTableColumnDesc(
                'p', 'Program', 'code', css=None, td_css={'width': '30%'})
    ]):
        for i in tqdm(indices):
            feed_dict = GView(dataset[i])
            image_filename = osp.join(args.data_image_root,
                                      feed_dict.image_filename)
            image = Image.open(image_filename)

            if 'objects' in feed_dict:
                fig, ax = vis_bboxes(image,
                                     feed_dict.objects,
                                     'object',
                                     add_text=False)
            else:
                fig, ax = vis_bboxes(image, [], 'object', add_text=False)
            _ = ax.set_title('object bounding box annotations')

            QA_string = """
                <p><b>Q</b>: {}</p>
                <p><b>A</b>: {}</p>
            """.format(feed_dict.question_raw, feed_dict.answer)
            P_string = '\n'.join([repr(x) for x in feed_dict.program_seq])

            vis.row(id=i, image=fig, qa=QA_string, p=P_string)
            plt.close()
    vis.end_html()

    logger.info(
        'Happy Holiday! You can find your result at "http://monday.csail.mit.edu/xiuming'
        + osp.realpath(args.data_vis_dir) + '".')
def main():
    args.dump_dir = ensure_path(
        osp.join('dumps', args.series_name, args.desc_name,
                 (args.training_target + ('-curriculum_' + args.curriculum) +
                  ('-qtrans_' + args.question_transform
                   if args.question_transform is not None else '') +
                  ('-' + args.expr if args.expr is not None else '') +
                  ('-lr_' + str(args.lr)))))

    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))
        args.meta_file = osp.join(args.meta_dir, args.run_name + '.json')
        args.log_file = osp.join(args.meta_dir, args.run_name + '.log')
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + '.meter.json')

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, 'w') as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(
                osp.join(args.dump_dir, 'tensorboard'))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root,
                                               args.run_name))

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(args, configs, args.data_image_root,
                            args.data_scenes_json, args.data_questions_json)

    dataset_trim = int(len(dataset) *
                       args.data_trim) if args.data_trim <= 1 else int(
                           args.data_trim)
    if dataset_trim > 0:
        dataset = dataset.trim_length(dataset_trim)

    dataset_split = int(len(dataset) *
                        args.data_split) if args.data_split <= 1 else int(
                            args.data_split)
    train_dataset, validation_dataset = dataset.split_trainval(dataset_split)

    extra_dataset = None
    if args.extra_data_dir is not None:
        extra_dataset = build_dataset(args, configs,
                                      args.extra_data_image_root,
                                      args.extra_data_scenes_json,
                                      args.extra_data_questions_json)

    main_train(train_dataset, validation_dataset, extra_dataset)
def get_data(batch_size=1, dataset_size=500):
    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    #use validation set
    dataset = build_dataset(args, configs, args.extra_data_image_root,
                            args.extra_data_scenes_json,
                            args.extra_data_questions_json)

    if dataset_size is not None:
        dataset = dataset.trim_length(dataset_size)

    dataloader = dataset.make_dataloader(batch_size=batch_size,
                                         shuffle=False,
                                         drop_last=False,
                                         nr_workers=1)
    train_iter = iter(dataloader)
    return train_iter, dataset_size
def main():
    args.dump_dir = ensure_path(osp.join(
        'dumps', args.series_name, args.desc_name))
    if args.normalized_boxes:
        args.dump_dir = args.dump_dir + '_norm_box'
    if args.even_smp_flag:
        args.dump_dir = args.dump_dir + '_even_smp'+str(args.frm_img_num)
    if args.even_smp_flag:
        args.dump_dir = args.dump_dir + '_col_box_ftr'
    args.dump_dir +=  '_' + args.version + '_' + args.prefix

    #if args.debug:
    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))
        args.meta_file = osp.join(args.meta_dir, args.run_name + '.json')
        args.log_file = osp.join(args.meta_dir, args.run_name + '.log')
        args.meter_file = osp.join(args.meta_dir, args.run_name + '.meter.json')

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(args.meta_file))
        with open(args.meta_file, 'w') as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(osp.join(args.dump_dir, 'tensorboard'))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root, args.run_name))

    initialize_dataset(args.dataset, args.version)
    #validation_dataset = extra_dataset 
    if args.testing_flag==1 or args.dataset=='billiards':
        validation_dataset = build_clevrer_dataset(args, 'test')
    else:
        validation_dataset = build_clevrer_dataset(args, 'validation')
    train_dataset = build_clevrer_dataset(args, 'train')

    extra_dataset = None
    main_train(train_dataset, validation_dataset, extra_dataset)
def main():
    initialize_dataset(args.dataset)
    build_symbolic_dataset = get_symbolic_dataset_builder(args.dataset)
    dataset = build_symbolic_dataset(args)
    dataset.unwrapped.vocab.dump_json(args.output)
    logger.critical('Vocab json dumped at: "{}".'.format(args.output))
Exemple #7
0
def main():
    args.dump_dir = ensure_path(
        osp.join(
            "dumps",
            args.series_name,
            args.desc_name,
            (args.training_target + ("-curriculum_" + args.curriculum) +
             ("-qtrans_" + args.question_transform
              if args.question_transform is not None else "") +
             ("-" + args.expr if args.expr is not None else "")),
        ))

    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, "checkpoints"))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, "meta"))
        args.meta_file = osp.join(args.meta_dir, args.run_name + ".json")
        args.log_file = osp.join(args.meta_dir, args.run_name + ".log")
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + ".meter.json")

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, "w") as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(
                osp.join(args.dump_dir, "tensorboard"))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root,
                                               args.run_name))

    if args.train_split is not None:
        with open(osp.join(args.data_dir, args.train_split)) as f:
            train_idxs = set(json.load(f))
    else:
        train_idxs = None
    if args.val_split is not None and args.val_data_dir is not None:
        with open(osp.join(args.val_data_dir, args.val_split)) as f:
            val_idxs = set(json.load(f))
    else:
        val_idxs = None
    if args.test_split is not None and args.test_data_dir is not None:
        with open(osp.join(args.test_data_dir, args.test_split)) as f:
            test_idxs = set(json.load(f))
    else:
        test_idxs = None

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(
        args,
        configs,
        args.data_image_root,
        args.data_depth_root,
        args.data_scenes_json,
        args.data_questions_json,
    )

    # dataset_trim = (
    #     int(len(dataset) * args.data_trim)
    #     if args.data_trim <= 1
    #     else int(args.data_trim)
    # )
    # if dataset_trim > 0:
    #     dataset = dataset.trim_length(dataset_trim)

    # # dataset_split = (
    # #     int(len(dataset) * args.data_split)
    # #     if args.data_split <= 1
    # #     else int(args.data_split)
    # # )
    # # train_dataset, validation_dataset = dataset.split_trainval(dataset_split)
    # if args.mv:
    #     ood_views = set(args.ood_views)
    #     id_views = set(range(args.num_views)) - ood_views
    train_dataset = dataset
    # if train_idxs:
    #     train_dataset = dataset.filter(
    #         lambda question: question["image_index"] in train_idxs,
    #         "filter_train_size_{}".format(len(train_idxs)),
    #     )
    val_dataset = None
    if args.val_data_dir is not None:
        val_dataset = build_dataset(
            args,
            configs,
            args.val_data_image_root,
            args.val_data_depth_root,
            args.val_data_scenes_json,
            args.val_data_questions_json,
        )
    #     if val_idxs:
    #         val_dataset = val_dataset.filter(
    #             lambda question: question["image_index"] in val_idxs,
    #             "filter_val_size_{}".format(len(val_idxs)),
    #         )
    test_dataset = None
    if args.test_data_dir is not None:
        test_dataset = build_dataset(
            args,
            configs,
            args.test_data_image_root,
            args.test_data_depth_root,
            args.test_data_scenes_json,
            args.test_data_questions_json,
        )
    #     if test_idxs:
    #         test_dataset = test_dataset.filter(
    #             lambda question: question["image_index"] in test_idxs,
    #             "filter_val_size_{}".format(len(test_idxs)),
    #         )
    #     test_dataset = {"test": test_dataset}
    # if args.mv:
    #     # train_dataset = train_dataset.filter(
    #     #     lambda question: question["view_id"] in id_views, "id_view"
    #     # )
    #     if val_dataset:
    #         val_dataset = val_dataset.filter(
    #             lambda question: question["view_id"] in id_views, "id_view"
    #         )
    #     if test_dataset:
    #         id_test = test_dataset["test"].filter(
    #             lambda question: question["view_id"] in id_views, "id_view"
    #         )
    #         ood_test = test_dataset["test"].filter(
    #             lambda question: question["view_id"] in ood_views, "ood_view"
    #         )
    #         test_dataset = {"id_test": id_test, "ood_test": ood_test}

    prototype_dataset = create_prototype_dataset(
        "/projects/data/clevr_nscl/one_shot_protos")
    one_shot_root = "/projects/data/clevr_nscl/one_shot_test_only"
    one_shot_dataset = build_dataset(
        args,
        configs,
        one_shot_root + "/images",
        one_shot_root + "/depth",
        one_shot_root + "/CLEVR_scenes_annotated_aligned.json",
        one_shot_root + "/CLEVR_questions.json",
    )
    main_train(train_dataset, val_dataset, test_dataset, prototype_dataset,
               one_shot_dataset)
Exemple #8
0
def main():
    args.dump_dir = ensure_path(
        osp.join(
            "dumps",
            args.series_name,
            args.desc_name,
            (args.training_target + ("-curriculum_" + args.curriculum) +
             ("-qtrans_" + args.question_transform
              if args.question_transform is not None else "") +
             ("-" + args.expr if args.expr is not None else "")),
        ))

    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, "checkpoints"))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, "meta"))
        args.meta_file = osp.join(args.meta_dir, args.run_name + ".json")
        args.log_file = osp.join(args.meta_dir, args.run_name + ".log")
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + ".meter.json")

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, "w") as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(
                osp.join(args.dump_dir, "tensorboard"))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root,
                                               args.run_name))

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(
        args,
        configs,
        args.data_image_root,
        args.data_scenes_json,
        args.data_questions_json,
    )

    dataset_trim = (int(len(dataset) * args.data_trim)
                    if args.data_trim <= 1 else int(args.data_trim))
    if dataset_trim > 0:
        dataset = dataset.trim_length(dataset_trim)

    dataset_split = (int(len(dataset) * args.data_split)
                     if args.data_split <= 1 else int(args.data_split))
    # from IPython import embed

    # embed()
    train_dataset, validation_dataset = dataset.split_trainval(dataset_split)

    extra_dataset = None
    if args.extra_data_dir is not None:
        extra_dataset = build_dataset(
            args,
            configs,
            args.extra_data_image_root,
            args.extra_data_scenes_json,
            args.extra_data_questions_json,
        )

    main_train(train_dataset, validation_dataset, extra_dataset)