Esempio n. 1
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    global_config.train = False

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        num_gpus=num_gpus)

    val_iterator, val_dataset_len = get_val_data_iterator(
        dataset_name=args.dataset,
        batch_size=args.batch_size,
        num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_iterator=val_iterator,
        val_dataset_len=val_dataset_len,
        num_gpus=num_gpus,
        calc_weight_count=True,
        extended_log=True)
Esempio n. 2
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()
    batch_size = args.batch_size

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        num_gpus=num_gpus)

    train_iter, val_iter = get_data_iterators(batch_size=batch_size,
                                              num_workers=args.num_workers)

    trainer = prepare_trainer(net=net,
                              optimizer_name=args.optimizer_name,
                              lr=args.lr,
                              momentum=args.momentum,
                              num_epochs=args.num_epochs,
                              train_iter=train_iter,
                              val_iter=val_iter,
                              logging_dir_path=args.save_dir,
                              num_gpus=num_gpus)

    # if args.save_dir and args.save_interval:
    #     lp_saver = TrainLogParamSaver(
    #         checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
    #         last_checkpoint_file_name_suffix="last",
    #         best_checkpoint_file_name_suffix=None,
    #         last_checkpoint_dir_path=args.save_dir,
    #         best_checkpoint_dir_path=None,
    #         last_checkpoint_file_count=2,
    #         best_checkpoint_file_count=2,
    #         checkpoint_file_save_callback=save_params,
    #         checkpoint_file_exts=['.npz', '.states'],
    #         save_interval=args.save_interval,
    #         num_epochs=args.num_epochs,
    #         param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'],
    #         acc_ind=2,
    #         # bigger=[True],
    #         # mask=None,
    #         score_log_file_path=os.path.join(args.save_dir, 'score.log'),
    #         score_log_attempt_value=args.attempt,
    #         best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
    # else:
    #     lp_saver = None

    trainer.run()
Esempio n. 3
0
def test_model(args):
    """
    Main test routine.

    Parameters:
    ----------
    args : ArgumentParser
        Main script arguments.

    Returns
    -------
    float
        Main accuracy value.
    """
    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)
    assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
    assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune

    global_config.train = False
    use_gpus = prepare_ch_context(args.num_gpus)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        use_gpus=use_gpus,
        net_extra_kwargs=ds_metainfo.test_net_extra_kwargs,
        num_classes=(args.num_classes if ds_metainfo.ml_type != "hpe" else None),
        in_channels=args.in_channels)
    assert (hasattr(net, "classes") or (ds_metainfo.ml_type == "hpe"))
    assert (hasattr(net, "in_size"))

    get_test_data_source_class = get_val_data_source if args.data_subset == "val" else get_test_data_source
    test_data = get_test_data_source_class(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)
    if args.data_subset == "val":
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.val_metric_names,
            metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
    else:
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.test_metric_names,
            metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)

    assert (args.use_pretrained or args.resume.strip())
    acc_values = calc_model_accuracy(
        net=net,
        test_data=test_data,
        metric=test_metric,
        calc_weight_count=True,
        calc_flops_only=args.calc_flops_only,
        extended_log=True)
    return acc_values[ds_metainfo.saver_acc_ind] if len(acc_values) > 0 else None
Esempio n. 4
0
def main():
    """
    Main body of script.
    """
    args = parse_args()

    if args.disable_cudnn_autotune:
        os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)
    assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
    assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune

    global_config.train = False
    use_gpus = prepare_ch_context(args.num_gpus)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        use_gpus=use_gpus,
                        net_extra_kwargs=ds_metainfo.net_extra_kwargs,
                        num_classes=args.num_classes,
                        in_channels=args.in_channels)
    assert (hasattr(net, "classes"))
    assert (hasattr(net, "in_size"))

    if args.data_subset == "val":
        get_test_data_source_class = get_val_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.val_metric_names,
            metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
    else:
        get_test_data_source_class = get_test_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.test_metric_names,
            metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
    test_data = get_test_data_source_class(ds_metainfo=ds_metainfo,
                                           batch_size=args.batch_size,
                                           num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip())
    test(net=net,
         test_data=test_data,
         metric=test_metric,
         calc_weight_count=True,
         extended_log=True)
Esempio n. 5
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    use_gpus = prepare_ch_context(args.num_gpus)
    # batch_size = args.batch_size

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        use_gpus=use_gpus,
        num_classes=args.num_classes,
        in_channels=args.in_channels)
    assert (hasattr(net, "classes"))
    assert (hasattr(net, "in_size"))

    train_data = get_train_data_source(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)
    val_data = get_val_data_source(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)

    trainer = prepare_trainer(
        net=net,
        optimizer_name=args.optimizer_name,
        lr=args.lr,
        momentum=args.momentum,
        num_epochs=args.num_epochs,
        train_data=train_data,
        val_data=val_data,
        logging_dir_path=args.save_dir,
        use_gpus=use_gpus)

    trainer.run()
Esempio n. 6
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    global_config.train = False

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        num_gpus=num_gpus)
    num_classes = net.classes if hasattr(net, 'classes') else 1000
    input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size

    val_iterator, val_dataset_len = get_val_data_iterator(
        data_dir=args.data_dir,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        num_classes=num_classes)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_iterator=val_iterator,
        val_dataset_len=val_dataset_len,
        num_gpus=num_gpus,
        input_image_size=input_image_size,
        resize_inv_factor=args.resize_inv_factor,
        calc_weight_count=True,
        extended_log=True)
Esempio n. 7
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    global_config.train = False

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        net_extra_kwargs={"aux": False, "fixed_size": False},
        use_gpus=(num_gpus > 0))

    test_dataset = get_test_dataset(
        dataset_name=args.dataset,
        dataset_dir=args.data_dir)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        test_dataset=test_dataset,
        num_gpus=num_gpus,
        num_classes=args.num_classes,
        calc_weight_count=True,
        extended_log=True,
        dataset_metainfo=get_metainfo(args.dataset))