def get_opt():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("--name", default = "GMM")
    parser.add_argument("--gpu_ids", default = "")
    parser.add_argument('-j', '--workers', type=int, default=1)
    parser.add_argument('-b', '--batch-size', type=int, default=4)
    
    parser.add_argument("--dataroot", default = "data")
    parser.add_argument("--vvt_dataroot", default="/data_hdd/vvt_competition")
    parser.add_argument("--mpv_dataroot", default="/data_hdd/mpv_competition")
    parser.add_argument("--datamode", default = "train")
    parser.add_argument( "--dataset", choices=DATASETS.keys(), default="cp" )
    parser.add_argument("--stage", default = "GMM")
    parser.add_argument("--data_list", default = "train_pairs.txt")
    parser.add_argument("--fine_width", type=int, default = 192)
    parser.add_argument("--fine_height", type=int, default = 256)
    parser.add_argument("--radius", type=int, default = 5)
    parser.add_argument("--grid_size", type=int, default = 5)
    parser.add_argument('--tensorboard_dir', type=str, help='save tensorboard infos')
    parser.add_argument('--result_dir', type=str, default='result', help='save result infos')
    parser.add_argument('--checkpoint', type=str, default='', help='model checkpoint for test')
    parser.add_argument("--display_count", type=int, default = 1)
    parser.add_argument("--shuffle", action='store_true', help='shuffle input data')

    opt = parser.parse_args()
    return opt
Beispiel #2
0
def parse_args():
    parser = argparse.ArgumentParser(description='Model training')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for testing, which is one of {}'.format(str(list(MODELS.keys()))),
        type=str,
        default='FarNet')

    # params of infer
    parser.add_argument(
        '--dataset',
        dest='dataset',
        help="The dataset you want to test, which is one of {}".format(str(list(DATASETS.keys()))),
        type=str,
        default='ImageFolder')
    parser.add_argument(
        '--infer_root',
        dest='infer_root',
        help="dataset root directory",
        type=str,
        default=None)
    parser.add_argument(
        '--num_workers',
        dest='num_workers',
        help="number works of data loader",
        type=int,
        default=0)

    # params of prediction
    parser.add_argument(
        '--batch_size',
        dest='batch_size',
        help='Mini batch size',
        type=int,
        default=32)
    parser.add_argument(
        '--model_file',
        dest='model_file',
        help='The path of model for evaluation',
        type=str,
        required=True)
    parser.add_argument(
        '--save_dir',
        dest='save_dir',
        help='The directory for saving the inference results',
        type=str,
        default='./outputs/result')
    parser.add_argument(
        '--device',
        dest='device',
        help='device for training',
        type=str,
        default="cuda")

    return parser.parse_args()
Beispiel #3
0
def parse_args():
    parser = argparse.ArgumentParser(description='Model evaluation')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for evaluation, which is one of {}'.format(
            str(list(MODELS.keys()))),
        type=str,
        default='FarNet')

    # params of dataset
    parser.add_argument(
        '--dataset',
        dest='dataset',
        help="The dataset you want to evaluation, which is one of {}".format(
            str(list(DATASETS.keys()))),
        type=str,
        default='Map')

    parser.add_argument('--dataset_root',
                        dest='dataset_root',
                        help="dataset root directory",
                        type=str,
                        default=None)

    # params of evaluate
    parser.add_argument("--input_size",
                        dest="input_size",
                        help="The image size for net inputs.",
                        nargs=2,
                        default=[256, 256],
                        type=int)

    parser.add_argument('--model_dir',
                        dest='model_dir',
                        help='The path of model for evaluation',
                        type=str,
                        default=None)

    return parser.parse_args()
Beispiel #4
0
            lsh = LSHBuilder.build(len(data[0]), exp_file['dist_threshold'], k,
                                   L, exp_file['lsh'], validate)
            res_fn = get_result_fn(exp_file['dataset'],
                                   exp_file['lsh']['type'], method, repr(lsh))
            if os.path.exists(res_fn) and not args.force:
                print(f"{res_fn} exists, skipping.")
            else:
                params.setdefault((k, L), []).append(method)
    return params


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--dataset',
        choices=DATASETS.keys(),
        default="mnist-784-euclidean",
    )
    parser.add_argument(
        '--seed',
        default=3,
        type=int,
    )
    parser.add_argument(
        '--exp-file',
        required=True,
    )
    parser.add_argument(
        '--force',
        action='store_true',
    )
Beispiel #5
0
    elif cfg['dataset']['type'] == 'S3DIS':
        if cfg['dataset']['test_area'] == 'All':
            run_args = [1, 2, 3, 4, 5, 6]
        else:
            run_args = [cfg['dataset']['test_area']]
    else:
        run_args = [None for i in range(args.runs)]

    for run_idx, run_arg in enumerate(run_args):
        if cfg['dataset']['type'] == 'ShapeNet':
            cfg['dataset']['categories'] = run_arg
            cfg['model']['categories'] = run_arg
        if cfg['dataset']['type'] == 'S3DIS':
            cfg['dataset']['test_area'] = run_arg

        train_dataset = DATASETS(cfg['dataset'], train=True)
        test_dataset = DATASETS(cfg['dataset'], train=False)
        train_loader = DATALOADERS(cfg.get('dataloader', None), train_dataset, shuffle=True, worker_init_fn=_init_fn)
        test_loader = DATALOADERS(cfg.get('dataloader', None), test_dataset, shuffle=False, worker_init_fn=_init_fn)
        model = MODELS(cfg['model'])
        if args.resume_last:
            checkpoint, last_epoch, best_epoch, best_test_acc = resume_last(model, work_dir, cfg['model'])
            logger.info('Resumed from ' + checkpoint)
        elif args.resume_best:
            checkpoint, last_epoch, best_epoch, best_test_acc = resume_best(model, work_dir, cfg['model'])
        elif args.resume_from:
            last_epoch, best_epoch, best_test_acc = resume_from(model, args.resume_from, cfg['model'])
            logger.info('Resumed from ' + args.resume_from)
        else:
            last_epoch = best_test_acc = best_epoch = 0
Beispiel #6
0
def parse_args():
    parser = argparse.ArgumentParser(description='Model training')

    # params of model
    parser.add_argument(
        '--model_name',
        dest='model_name',
        help='Model type for training, which is one of {}'.format(
            str(list(MODELS.keys()))),
        type=str,
        default='FarNet')

    # params of dataset
    parser.add_argument(
        '--dataset',
        dest='dataset',
        help="The dataset you want to train, which is one of {}".format(
            str(list(DATASETS.keys()))),
        type=str,
        default='ImagePairs')
    parser.add_argument('--train_root',
                        dest='train_root',
                        help="train dataset root directory",
                        type=str,
                        required=True)
    parser.add_argument('--val_root',
                        dest='val_root',
                        help="val dataset root directory",
                        type=str,
                        default=None)
    parser.add_argument('--num_workers',
                        dest='num_workers',
                        help="number works of data loader",
                        type=int,
                        default=0)
    parser.add_argument('--device',
                        dest='device',
                        help='device for training',
                        type=str,
                        default="cuda")

    # params of training
    parser.add_argument('--epochs',
                        dest='epochs',
                        help='epochs for training',
                        type=int,
                        default=20)
    parser.add_argument('--batch_size',
                        dest='batch_size',
                        help='Mini batch size of one gpu or cpu',
                        type=int,
                        default=32)
    parser.add_argument('--lr',
                        dest='lr',
                        help='Learning rate',
                        type=float,
                        default=0.0005)
    parser.add_argument('--resume',
                        dest='resume',
                        help='The path of resume model',
                        type=str,
                        default=None)
    parser.add_argument('--save_dir',
                        dest='save_dir',
                        help='The directory for saving the model snapshot',
                        type=str,
                        default='./outputs')
    parser.add_argument('--logs_dir',
                        dest='logs_dir',
                        help='The directory for saving the log message',
                        type=str,
                        default='./logs')

    return parser.parse_args()
Beispiel #7
0
################
# Top Level
################
parser.add_argument('--mode', type=str, default='train', choices=['train'])
parser.add_argument('--template', type=str, default=None)

################
# Test
################
parser.add_argument('--test_model_path', type=str, default=None)

################
# Dataset
################
parser.add_argument('--dataset_code', type=str, default='ml-20m', choices=DATASETS.keys())
parser.add_argument('--min_rating', type=int, default=4, help='Only keep ratings greater than equal to this value')
parser.add_argument('--min_uc', type=int, default=5, help='Only keep users with more than min_uc ratings')
parser.add_argument('--min_sc', type=int, default=0, help='Only keep items with more than min_sc ratings')
parser.add_argument('--split', type=str, default='leave_one_out', help='How to split the datasets')
parser.add_argument('--dataset_split_seed', type=int, default=98765)
parser.add_argument('--eval_set_size', type=int, default=500, 
                    help='Size of val and test set. 500 for ML-1m and 10000 for ML-20m recommended')

################
# Dataloader
################
parser.add_argument('--dataloader_code', type=str, default='bert', choices=DATALOADERS.keys())
parser.add_argument('--dataloader_random_seed', type=int, default=0.0)
parser.add_argument('--train_batch_size', type=int, default=64)
parser.add_argument('--val_batch_size', type=int, default=64)
        f.write(report)

    acc = np.sum(_confusion_matrix.diagonal()) / np.sum(_confusion_matrix)
    print(f"Overall {data_set} accuracy: {acc*100}%")

    # top 5 accuracy
    if data_set != "PDX":
        real_int = [from_label_to_int[k] for k in y]
        pred_probs = keras_model.predict(x)
        top5_acc = top_n_accuracy(real_int, pred_probs, n=5)
        print(f"Overall top 5 accuracy {data_set}': {top5_acc*100}%")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("datasets", nargs="+", choices=DATASETS.keys())
    parser.add_argument("--data-dir", default="data")
    parser.add_argument(
        "--model", required=True, default="inception",
        choices=["inception", "cnn", "resnet"]
    )
    parser.add_argument("--models-dir", default="models")
    parser.add_argument("--output-dir", default="output")
    args = parser.parse_args()

    start_time = tt.time()

    model_files = {
        "inception": f"{args.models_dir}/inception_net_1d.h5",
        "cnn": f"{args.models_dir}/cnn.h5",
        "resnet": f"{args.models_dir}/resnet.h5"