Example #1
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=50)
    parser.add_argument('--topk', type=int, default=128)
    parser.add_argument('--model',
                        type=str,
                        default='conv',
                        choices=['rn', 'baseline'])
    parser.add_argument('--checkpoint_path', type=str)
    parser.add_argument('--train_dir', type=str)
    parser.add_argument('--dataset_path',
                        type=str,
                        default='Sort-of-CLEVR_default')
    parser.add_argument('--data_id', nargs='*', default=None)
    parser.add_argument('--name', type=str, default="some_model")
    config = parser.parse_args()

    path = os.path.join('./datasets', config.dataset_path)

    if check_data_path(path):
        import sort_of_clevr as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    dataset_train, dataset_test = dataset.create_default_splits(path)

    evaler = Evaler(config, dataset_test)

    log.warning("dataset: %s", config.dataset_path)
    evaler.eval_run()
Example #2
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=16)
    parser.add_argument('--model',
                        type=str,
                        default='baseline',
                        choices=['rn', 'baseline'])
    parser.add_argument('--prefix', type=str, default='default')
    parser.add_argument('--checkpoint', type=str, default=None)
    parser.add_argument('--dataset_path',
                        type=str,
                        default='Sort-of-CLEVR_default')
    parser.add_argument('--learning_rate', type=float, default=2.5e-4)
    parser.add_argument('--lr_weight_decay',
                        action='store_true',
                        default=False)
    config = parser.parse_args()

    path = os.path.join('./datasets', config.dataset_path)

    if check_data_path(path):
        import sort_of_clevr as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    dataset_train, dataset_test = dataset.create_default_splits(path)

    trainer = Trainer(config, dataset_train, dataset_test)

    log.warning("dataset: %s, learning_rate: %f", config.dataset_path,
                config.learning_rate)
    trainer.train()
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--model', type=str, default='rn', choices=['rn', 'baseline'])
    parser.add_argument('--checkpoint_path', type=str)
    parser.add_argument('--location', action='store_true')
    parser.add_argument('--visualize', action='store_true')
    parser.add_argument('--train_dir', type=str)
    parser.add_argument('--dataset_path', type=str, default='')
    parser.add_argument('--data_id', nargs='*', default=None)
    config = parser.parse_args()

    path = os.path.join('../DatasetCreation/VG', config.dataset_path)

    if check_data_path(path):
        import sort_of_clevr as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    dataset_train, dataset_test = dataset.create_default_splits(path)

    evaler = Evaler(config, dataset_test)

    log.warning("dataset: %s", config.dataset_path)
    evaler.eval_run()
Example #4
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', type=int, default=16 * 1)
    parser.add_argument('--model',
                        type=str,
                        default='ilp',
                        choices=['rn', 'baseline'])
    # parser.add_argument('--checkpoint_path', type=str,default='./train_dir/ilp-default-Sort-of-CLEVR_default_lr_0.0025-20190619-195552/model-32000')
    # parser.add_argument('--checkpoint_path', type=str,default='./train_dir/ilp-default-Sort-of-CLEVR_default_lr_0.0025-20190619-115754/model-42000')
    parser.add_argument(
        '--checkpoint_path',
        type=str,
        default=
        './train_dir/ilp-default-Sort-of-CLEVR_default_lr_0.002-20190807-173045/model-80000'
    )
    parser.add_argument('--train_dir', type=str)
    parser.add_argument('--dataset_path',
                        type=str,
                        default='Sort-of-CLEVR_default')
    parser.add_argument('--data_id', nargs='*', default=None)
    config = parser.parse_args()

    path = os.path.join('./datasets', config.dataset_path)

    if check_data_path(path):
        import sort_of_clevr as dataset
    else:
        raise ValueError(path)

    config.data_info = dataset.get_data_info()
    config.conv_info = dataset.get_conv_info()
    dataset_train, dataset_test = dataset.create_default_splits(path)

    evaler = Evaler(config, dataset_test)

    # qs1=[]
    # qs2=[]
    # ans=[]
    # for id in dataset_test._ids:
    #     dt = dataset_train.get_data(id)
    #     qs1.append( np.argmax(dt[1][:6]) )
    #     qs2.append( np.argmax(dt[1][6:]) )
    #     ans.append( np.argmax(dt[2]) )

    log.warning("dataset: %s", config.dataset_path)
    evaler.eval_run()