예제 #1
0
def main():
    args.dump_dir = ensure_path(
        osp.join('dumps', args.series_name, args.desc_name,
                 (args.training_target + ('-curriculum_' + args.curriculum) +
                  ('-qtrans_' + args.question_transform
                   if args.question_transform is not None else '') +
                  ('-' + args.expr if args.expr is not None else '') +
                  ('-lr_' + str(args.lr)))))

    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, 'checkpoints'))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, 'meta'))
        args.meta_file = osp.join(args.meta_dir, args.run_name + '.json')
        args.log_file = osp.join(args.meta_dir, args.run_name + '.log')
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + '.meter.json')

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, 'w') as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(
                osp.join(args.dump_dir, 'tensorboard'))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root,
                                               args.run_name))

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(args, configs, args.data_image_root,
                            args.data_scenes_json, args.data_questions_json)

    dataset_trim = int(len(dataset) *
                       args.data_trim) if args.data_trim <= 1 else int(
                           args.data_trim)
    if dataset_trim > 0:
        dataset = dataset.trim_length(dataset_trim)

    dataset_split = int(len(dataset) *
                        args.data_split) if args.data_split <= 1 else int(
                            args.data_split)
    train_dataset, validation_dataset = dataset.split_trainval(dataset_split)

    extra_dataset = None
    if args.extra_data_dir is not None:
        extra_dataset = build_dataset(args, configs,
                                      args.extra_data_image_root,
                                      args.extra_data_scenes_json,
                                      args.extra_data_questions_json)

    main_train(train_dataset, validation_dataset, extra_dataset)
def get_data(batch_size=1, dataset_size=500):
    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    #use validation set
    dataset = build_dataset(args, configs, args.extra_data_image_root,
                            args.extra_data_scenes_json,
                            args.extra_data_questions_json)

    if dataset_size is not None:
        dataset = dataset.trim_length(dataset_size)

    dataloader = dataset.make_dataloader(batch_size=batch_size,
                                         shuffle=False,
                                         drop_last=False,
                                         nr_workers=1)
    train_iter = iter(dataloader)
    return train_iter, dataset_size
예제 #3
0
def main():
    args.dump_dir = ensure_path(
        osp.join(
            "dumps",
            args.series_name,
            args.desc_name,
            (args.training_target + ("-curriculum_" + args.curriculum) +
             ("-qtrans_" + args.question_transform
              if args.question_transform is not None else "") +
             ("-" + args.expr if args.expr is not None else "")),
        ))

    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, "checkpoints"))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, "meta"))
        args.meta_file = osp.join(args.meta_dir, args.run_name + ".json")
        args.log_file = osp.join(args.meta_dir, args.run_name + ".log")
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + ".meter.json")

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, "w") as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(
                osp.join(args.dump_dir, "tensorboard"))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root,
                                               args.run_name))

    if args.train_split is not None:
        with open(osp.join(args.data_dir, args.train_split)) as f:
            train_idxs = set(json.load(f))
    else:
        train_idxs = None
    if args.val_split is not None and args.val_data_dir is not None:
        with open(osp.join(args.val_data_dir, args.val_split)) as f:
            val_idxs = set(json.load(f))
    else:
        val_idxs = None
    if args.test_split is not None and args.test_data_dir is not None:
        with open(osp.join(args.test_data_dir, args.test_split)) as f:
            test_idxs = set(json.load(f))
    else:
        test_idxs = None

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(
        args,
        configs,
        args.data_image_root,
        args.data_depth_root,
        args.data_scenes_json,
        args.data_questions_json,
    )

    # dataset_trim = (
    #     int(len(dataset) * args.data_trim)
    #     if args.data_trim <= 1
    #     else int(args.data_trim)
    # )
    # if dataset_trim > 0:
    #     dataset = dataset.trim_length(dataset_trim)

    # # dataset_split = (
    # #     int(len(dataset) * args.data_split)
    # #     if args.data_split <= 1
    # #     else int(args.data_split)
    # # )
    # # train_dataset, validation_dataset = dataset.split_trainval(dataset_split)
    # if args.mv:
    #     ood_views = set(args.ood_views)
    #     id_views = set(range(args.num_views)) - ood_views
    train_dataset = dataset
    # if train_idxs:
    #     train_dataset = dataset.filter(
    #         lambda question: question["image_index"] in train_idxs,
    #         "filter_train_size_{}".format(len(train_idxs)),
    #     )
    val_dataset = None
    if args.val_data_dir is not None:
        val_dataset = build_dataset(
            args,
            configs,
            args.val_data_image_root,
            args.val_data_depth_root,
            args.val_data_scenes_json,
            args.val_data_questions_json,
        )
    #     if val_idxs:
    #         val_dataset = val_dataset.filter(
    #             lambda question: question["image_index"] in val_idxs,
    #             "filter_val_size_{}".format(len(val_idxs)),
    #         )
    test_dataset = None
    if args.test_data_dir is not None:
        test_dataset = build_dataset(
            args,
            configs,
            args.test_data_image_root,
            args.test_data_depth_root,
            args.test_data_scenes_json,
            args.test_data_questions_json,
        )
    #     if test_idxs:
    #         test_dataset = test_dataset.filter(
    #             lambda question: question["image_index"] in test_idxs,
    #             "filter_val_size_{}".format(len(test_idxs)),
    #         )
    #     test_dataset = {"test": test_dataset}
    # if args.mv:
    #     # train_dataset = train_dataset.filter(
    #     #     lambda question: question["view_id"] in id_views, "id_view"
    #     # )
    #     if val_dataset:
    #         val_dataset = val_dataset.filter(
    #             lambda question: question["view_id"] in id_views, "id_view"
    #         )
    #     if test_dataset:
    #         id_test = test_dataset["test"].filter(
    #             lambda question: question["view_id"] in id_views, "id_view"
    #         )
    #         ood_test = test_dataset["test"].filter(
    #             lambda question: question["view_id"] in ood_views, "ood_view"
    #         )
    #         test_dataset = {"id_test": id_test, "ood_test": ood_test}

    prototype_dataset = create_prototype_dataset(
        "/projects/data/clevr_nscl/one_shot_protos")
    one_shot_root = "/projects/data/clevr_nscl/one_shot_test_only"
    one_shot_dataset = build_dataset(
        args,
        configs,
        one_shot_root + "/images",
        one_shot_root + "/depth",
        one_shot_root + "/CLEVR_scenes_annotated_aligned.json",
        one_shot_root + "/CLEVR_questions.json",
    )
    main_train(train_dataset, val_dataset, test_dataset, prototype_dataset,
               one_shot_dataset)
예제 #4
0
def main():
    args.dump_dir = ensure_path(
        osp.join(
            "dumps",
            args.series_name,
            args.desc_name,
            (args.training_target + ("-curriculum_" + args.curriculum) +
             ("-qtrans_" + args.question_transform
              if args.question_transform is not None else "") +
             ("-" + args.expr if args.expr is not None else "")),
        ))

    if not args.debug:
        args.ckpt_dir = ensure_path(osp.join(args.dump_dir, "checkpoints"))
        args.meta_dir = ensure_path(osp.join(args.dump_dir, "meta"))
        args.meta_file = osp.join(args.meta_dir, args.run_name + ".json")
        args.log_file = osp.join(args.meta_dir, args.run_name + ".log")
        args.meter_file = osp.join(args.meta_dir,
                                   args.run_name + ".meter.json")

        logger.critical('Writing logs to file: "{}".'.format(args.log_file))
        set_output_file(args.log_file)

        logger.critical('Writing metainfo to file: "{}".'.format(
            args.meta_file))
        with open(args.meta_file, "w") as f:
            f.write(dump_metainfo(args=args.__dict__, configs=configs))

        # Initialize the tensorboard.
        if args.use_tb:
            args.tb_dir_root = ensure_path(
                osp.join(args.dump_dir, "tensorboard"))
            args.tb_dir = ensure_path(osp.join(args.tb_dir_root,
                                               args.run_name))

    initialize_dataset(args.dataset)
    build_dataset = get_dataset_builder(args.dataset)

    dataset = build_dataset(
        args,
        configs,
        args.data_image_root,
        args.data_scenes_json,
        args.data_questions_json,
    )

    dataset_trim = (int(len(dataset) * args.data_trim)
                    if args.data_trim <= 1 else int(args.data_trim))
    if dataset_trim > 0:
        dataset = dataset.trim_length(dataset_trim)

    dataset_split = (int(len(dataset) * args.data_split)
                     if args.data_split <= 1 else int(args.data_split))
    # from IPython import embed

    # embed()
    train_dataset, validation_dataset = dataset.split_trainval(dataset_split)

    extra_dataset = None
    if args.extra_data_dir is not None:
        extra_dataset = build_dataset(
            args,
            configs,
            args.extra_data_image_root,
            args.extra_data_scenes_json,
            args.extra_data_questions_json,
        )

    main_train(train_dataset, validation_dataset, extra_dataset)