コード例 #1
0
ファイル: eval_tf2.py プロジェクト: duynn2019/imgclsmob
def main():
    """
    Main body of script.
    """
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    data_format = "channels_last"
    tf.keras.backend.set_image_data_format(data_format)

    use_cuda = (args.num_gpus > 0)

    batch_size = args.batch_size
    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        batch_size=batch_size,
        use_cuda=use_cuda)
    assert (hasattr(net, "in_size"))
    # input_image_size = net.in_size

    test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name="test_accuracy")

    @tf.function
    def test_step(images, labels):
        predictions = net(images)
        test_accuracy(labels, predictions)

    data_dir = args.data_dir
    val_dir = os.path.join(data_dir, "val")

    val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=(1.0 / 255))
    val_generator = val_datagen.flow_from_directory(
        val_dir,
        target_size=(224, 224),
        class_mode="binary",
        batch_size=batch_size,
        shuffle=False)

    if args.show_progress:
        from tqdm import tqdm
        val_generator = tqdm(val_generator)

    # total_img_count = val_generator.n
    total_img_count = 50000
    processed_img_count = 0
    for test_images, test_labels in val_generator:
        if processed_img_count >= total_img_count:
            break
        test_step(test_images, test_labels)
        processed_img_count += batch_size

    logging.info("Test Accuracy: {}".format(test_accuracy.result() * 100))
コード例 #2
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    batch_size = prepare_tf_context(num_gpus=args.num_gpus,
                                    batch_size=args.batch_size)

    classes = 1000
    net, inputs_desc = prepare_model(
        model_name=args.model,
        classes=classes,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip())

    val_dataflow = get_data(is_train=False,
                            batch_size=batch_size,
                            data_dir_path=args.data_dir)

    assert (args.use_pretrained or args.resume.strip())
    test(net=net,
         session_init=inputs_desc,
         val_dataflow=val_dataflow,
         do_calc_flops=args.calc_flops,
         extended_log=True)
コード例 #3
0
def main():
    args = parse_args()

    os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
    assert (args.batch_size == 1)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    ctx, batch_size = prepare_mx_context(num_gpus=args.num_gpus,
                                         batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        dtype=args.dtype,
                        net_extra_kwargs=ds_metainfo.net_extra_kwargs,
                        load_ignore_extra=False,
                        classes=args.num_classes,
                        in_channels=args.in_channels,
                        do_hybridize=False,
                        ctx=ctx)

    test_data = get_val_data_source(ds_metainfo=ds_metainfo,
                                    batch_size=args.batch_size,
                                    num_workers=args.num_workers)

    calc_detector_repeatability(test_data=test_data, net=net, ctx=ctx)
コード例 #4
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)
    logger.set_logger_dir(args.save_dir)

    batch_size = prepare_tf_context(num_gpus=args.num_gpus,
                                    batch_size=args.batch_size)

    classes = 1000
    net, inputs_desc = prepare_model(
        model_name=args.model,
        classes=classes,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip())

    train_dataflow = get_data(is_train=True,
                              batch_size=batch_size,
                              data_dir_path=args.data_dir)
    val_dataflow = get_data(is_train=False,
                            batch_size=batch_size,
                            data_dir_path=args.data_dir)

    train_net(net=net,
              session_init=inputs_desc,
              batch_size=batch_size,
              num_epochs=args.num_epochs,
              train_dataflow=train_dataflow,
              val_dataflow=val_dataflow)
コード例 #5
0
ファイル: eval_gl.py プロジェクト: siddie/imgclsmob
def main():
    """
    Main body of script.
    """
    args = parse_args()

    if args.disable_cudnn_autotune:
        os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    if args.all:
        args.use_pretrained = True
        for model_name, model_metainfo in (_model_sha1.items()
                                           if version_info[0] >= 3 else
                                           _model_sha1.iteritems()):
            error, checksum, repo_release_tag = model_metainfo
            args.model = model_name
            logging.info("==============")
            logging.info("Checking model: {}".format(model_name))
            acc_value = test_model(args=args)
            if acc_value is not None:
                exp_value = int(error) * 1e-4
                if abs(acc_value - exp_value) > 2e-4:
                    logging.info(
                        "----> Wrong value detected (expected value: {})!".
                        format(exp_value))
    else:
        test_model(args=args)
コード例 #6
0
ファイル: prep_oi4.py プロジェクト: yyfyan/imgclsmob
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.src_data_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    src_dir_path = args.src_data_dir
    if not os.path.exists(src_dir_path):
        logging.error('Source directory does not exist.')
        return
    dst_dir_path = args.dst_data_dir
    if not os.path.exists(dst_dir_path):
        os.makedirs(dst_dir_path)

    create_train_data_subset(
        src_dir_path=src_dir_path,
        dst_dir_path=dst_dir_path,
        src_data_subset_name="train",
        dst_data_subset_name="train",
        annotations_sha1="4203637e3fb28f3c57c7d4e0c53121cd5e9e098e",
        urls_sha1="2f64a7d611426cbc4ac3ffa029908d1871c9317d",
        num_classes=1000,
        max_image_count=5000)
コード例 #7
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    global_config.train = False

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        num_gpus=num_gpus)

    val_iterator, val_dataset_len = get_val_data_iterator(
        dataset_name=args.dataset,
        batch_size=args.batch_size,
        num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_iterator=val_iterator,
        val_dataset_len=val_dataset_len,
        num_gpus=num_gpus,
        calc_weight_count=True,
        extended_log=True)
コード例 #8
0
ファイル: eval_tf2.py プロジェクト: zhangkehua/imgclsmob
def main():
    """
    Main body of script.
    """
    args = parse_args()

    gpus = tf.config.experimental.list_physical_devices("GPU")
    if gpus:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    data_format = "channels_last"
    tf.keras.backend.set_image_data_format(data_format)

    use_cuda = (args.num_gpus > 0)

    if args.all:
        args.use_pretrained = True
        dataset_name_map = {
            "in1k": "ImageNet1K",
            "cub": "CUB200_2011",
            "cf10": "CIFAR10",
            "cf100": "CIFAR100",
            "svhn": "SVHN",
            "voc": "VOC",
            "ade20k": "ADE20K",
            "cs": "Cityscapes",
            "cocoseg": "CocoSeg",
            "cocohpe": "CocoHpe",
            "hp": "HPatches",
        }
        for model_name, model_metainfo in (_model_sha1.items() if version_info[0] >= 3 else _model_sha1.iteritems()):
            error, checksum, repo_release_tag, ds, scale = model_metainfo
            args.dataset = dataset_name_map[ds]
            args.model = model_name
            args.resize_inv_factor = scale
            logging.info("==============")
            logging.info("Checking model: {}".format(model_name))
            acc_value = test_model(
                args=args,
                use_cuda=use_cuda,
                data_format=data_format)
            if acc_value is not None:
                exp_value = int(error) * 1e-4
                if abs(acc_value - exp_value) > 2e-4:
                    logging.info("----> Wrong value detected (expected value: {})!".format(exp_value))
            tf.keras.backend.clear_session()
    else:
        test_model(
            args=args,
            use_cuda=use_cuda,
            data_format=data_format)
コード例 #9
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()
    batch_size = args.batch_size

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        num_gpus=num_gpus)

    train_iter, val_iter = get_data_iterators(batch_size=batch_size,
                                              num_workers=args.num_workers)

    trainer = prepare_trainer(net=net,
                              optimizer_name=args.optimizer_name,
                              lr=args.lr,
                              momentum=args.momentum,
                              num_epochs=args.num_epochs,
                              train_iter=train_iter,
                              val_iter=val_iter,
                              logging_dir_path=args.save_dir,
                              num_gpus=num_gpus)

    # if args.save_dir and args.save_interval:
    #     lp_saver = TrainLogParamSaver(
    #         checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
    #         last_checkpoint_file_name_suffix="last",
    #         best_checkpoint_file_name_suffix=None,
    #         last_checkpoint_dir_path=args.save_dir,
    #         best_checkpoint_dir_path=None,
    #         last_checkpoint_file_count=2,
    #         best_checkpoint_file_count=2,
    #         checkpoint_file_save_callback=save_params,
    #         checkpoint_file_exts=['.npz', '.states'],
    #         save_interval=args.save_interval,
    #         num_epochs=args.num_epochs,
    #         param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'],
    #         acc_ind=2,
    #         # bigger=[True],
    #         # mask=None,
    #         score_log_file_path=os.path.join(args.save_dir, 'score.log'),
    #         score_log_attempt_value=args.attempt,
    #         best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
    # else:
    #     lp_saver = None

    trainer.run()
コード例 #10
0
def main():
    """
    Main body of script.
    """
    args = parse_args()

    if args.disable_cudnn_autotune:
        os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    if args.all:
        args.use_pretrained = True
        dataset_name_map = {
            "in1k": "ImageNet1K",
            "cub": "CUB200_2011",
            "cf10": "CIFAR10",
            "cf100": "CIFAR100",
            "svhn": "SVHN",
            "voc": "VOC",
            "ade20k": "ADE20K",
            "cs": "Cityscapes",
            "cocoseg": "CocoSeg",
            "cocohpe": "CocoHpe",
            "hp": "HPatches",
        }
        for model_name, model_metainfo in (_model_sha1.items()
                                           if version_info[0] >= 3 else
                                           _model_sha1.iteritems()):
            error, checksum, repo_release_tag, caption, paper, ds, img_size, scale, batch, rem = model_metainfo
            if (ds != "in1k") or (img_size == 0) or ((len(rem) > 0) and
                                                     (rem[-1] == "*")):
                continue
            args.dataset = dataset_name_map[ds]
            args.model = model_name
            args.input_size = img_size
            args.resize_inv_factor = scale
            args.batch_size = batch
            logging.info("==============")
            logging.info("Checking model: {}".format(model_name))
            acc_value = test_model(args=args)
            if acc_value is not None:
                exp_value = int(error) * 1e-4
                if abs(acc_value - exp_value) > 2e-4:
                    logging.info(
                        "----> Wrong value detected (expected value: {})!".
                        format(exp_value))
    else:
        test_model(args=args)
コード例 #11
0
def main():
    """
    Main body of script.
    """
    args = parse_args()

    if args.disable_cudnn_autotune:
        os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)
    assert (ds_metainfo.ml_type != "imgseg") or (args.batch_size == 1)
    assert (ds_metainfo.ml_type != "imgseg") or args.disable_cudnn_autotune

    global_config.train = False
    use_gpus = prepare_ch_context(args.num_gpus)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        use_gpus=use_gpus,
                        net_extra_kwargs=ds_metainfo.net_extra_kwargs,
                        num_classes=args.num_classes,
                        in_channels=args.in_channels)
    assert (hasattr(net, "classes"))
    assert (hasattr(net, "in_size"))

    if args.data_subset == "val":
        get_test_data_source_class = get_val_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.val_metric_names,
            metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
    else:
        get_test_data_source_class = get_test_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.test_metric_names,
            metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
    test_data = get_test_data_source_class(ds_metainfo=ds_metainfo,
                                           batch_size=args.batch_size,
                                           num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip())
    test(net=net,
         test_data=test_data,
         metric=test_metric,
         calc_weight_count=True,
         extended_log=True)
コード例 #12
0
def main():
    """
    Main body of script.
    """
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    batch_size = prepare_ke_context(num_gpus=args.num_gpus,
                                    batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip())
    num_classes = net.classes if hasattr(net, "classes") else 1000
    input_image_size = net.in_size if hasattr(
        net, "in_size") else (args.input_size, args.input_size)

    train_data, val_data = get_data_rec(
        rec_train=args.rec_train,
        rec_train_idx=args.rec_train_idx,
        rec_val=args.rec_val,
        rec_val_idx=args.rec_val_idx,
        batch_size=batch_size,
        num_workers=args.num_workers,
        input_image_size=input_image_size,
        resize_inv_factor=args.resize_inv_factor)
    train_gen = get_data_generator(data_iterator=train_data,
                                   num_classes=num_classes)
    val_gen = get_data_generator(data_iterator=val_data,
                                 num_classes=num_classes)

    net = prepare_trainer(net=net,
                          optimizer_name=args.optimizer_name,
                          momentum=args.momentum,
                          lr=args.lr,
                          num_gpus=args.num_gpus,
                          state_file_path=args.resume_state)

    train_net(net=net,
              train_gen=train_gen,
              val_gen=val_gen,
              train_num_examples=1281167,
              val_num_examples=50048,
              num_epochs=args.num_epochs,
              checkpoint_filepath=os.path.join(
                  args.save_dir, "imagenet_{}.h5".format(args.model)),
              start_epoch1=args.start_epoch)
コード例 #13
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ctx, batch_size = prepare_mx_context(num_gpus=args.num_gpus,
                                         batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        dtype=args.dtype,
                        tune_layers="",
                        ctx=ctx)
    input_image_size = net.in_size if hasattr(
        net, 'in_size') else (args.input_size, args.input_size)

    if args.use_rec:
        train_data, val_data, batch_fn = get_data_rec(
            rec_train=args.rec_train,
            rec_train_idx=args.rec_train_idx,
            rec_val=args.rec_val,
            rec_val_idx=args.rec_val_idx,
            batch_size=batch_size,
            num_workers=args.num_workers,
            input_image_size=input_image_size,
            resize_inv_factor=args.resize_inv_factor)
    else:
        train_data, val_data, batch_fn = get_data_loader(
            data_dir=args.data_dir,
            batch_size=batch_size,
            num_workers=args.num_workers,
            input_image_size=input_image_size,
            resize_inv_factor=args.resize_inv_factor)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_data=val_data,
        batch_fn=batch_fn,
        use_rec=args.use_rec,
        dtype=args.dtype,
        ctx=ctx,
        # calc_weight_count=(not log_file_exist),
        calc_weight_count=True,
        extended_log=True)
コード例 #14
0
ファイル: eval_gl.py プロジェクト: GuideWsp/imgclsmob
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ctx, batch_size = prepare_mx_context(num_gpus=args.num_gpus,
                                         batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        dtype=args.dtype,
                        classes=args.num_classes,
                        in_channels=args.in_channels,
                        do_hybridize=(not args.calc_flops),
                        ctx=ctx)

    assert (hasattr(net, "in_size"))
    input_image_size = net.in_size

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    val_data = get_val_data_source(dataset_metainfo=ds_metainfo,
                                   dataset_dir=args.data_dir,
                                   batch_size=batch_size,
                                   num_workers=args.num_workers,
                                   input_image_size=input_image_size,
                                   resize_inv_factor=args.resize_inv_factor)
    batch_fn = get_batch_fn(use_imgrec=ds_metainfo.use_imgrec)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        val_data=val_data,
        batch_fn=batch_fn,
        data_source_needs_reset=ds_metainfo.use_imgrec,
        val_metric=get_composite_metric(ds_metainfo.val_metric_names),
        dtype=args.dtype,
        ctx=ctx,
        input_image_size=input_image_size,
        in_channels=args.in_channels,
        # calc_weight_count=(not log_file_exist),
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True)
コード例 #15
0
def main():
    os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ctx, batch_size = prepare_mx_context(
        num_gpus=args.num_gpus,
        batch_size=1)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        dtype=args.dtype,
        net_extra_kwargs={"aux": False, "fixed_size": False},
        load_ignore_extra=True,
        classes=args.num_classes,
        in_channels=args.in_channels,
        do_hybridize=False,
        ctx=ctx)
    input_image_size = net.in_size if hasattr(net, 'in_size') else (480, 480)

    test_data = get_test_data_source(
        dataset_name=args.dataset,
        dataset_dir=args.data_dir,
        batch_size=batch_size,
        num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        test_data=test_data,
        data_source_needs_reset=False,
        dtype=args.dtype,
        ctx=ctx,
        input_image_size=input_image_size,
        in_channels=args.in_channels,
        classes=args.num_classes,
        # calc_weight_count=(not log_file_exist),
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True,
        dataset_metainfo=get_metainfo(args.dataset))
コード例 #16
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus,
                                              batch_size=1)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        use_cuda=use_cuda,
                        net_extra_kwargs={
                            "aux": False,
                            "fixed_size": False
                        },
                        load_ignore_extra=True,
                        remove_module=args.remove_module)
    if hasattr(net, 'module'):
        input_image_size = net.module.in_size[0] if hasattr(
            net.module, 'in_size') else args.input_size
    else:
        input_image_size = net.in_size[0] if hasattr(
            net, 'in_size') else args.input_size

    test_data = get_test_data_loader(dataset_name=args.dataset,
                                     dataset_dir=args.data_dir,
                                     batch_size=batch_size,
                                     num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        test_data=test_data,
        use_cuda=use_cuda,
        # calc_weight_count=(not log_file_exist),
        input_image_size=(input_image_size, input_image_size),
        in_channels=args.in_channels,
        num_classes=args.num_classes,
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True,
        dataset_metainfo=get_metainfo(args.dataset))
コード例 #17
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    use_gpus = prepare_ch_context(args.num_gpus)
    # batch_size = args.batch_size

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        use_gpus=use_gpus,
        num_classes=args.num_classes,
        in_channels=args.in_channels)
    assert (hasattr(net, "classes"))
    assert (hasattr(net, "in_size"))

    train_data = get_train_data_source(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)
    val_data = get_val_data_source(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)

    trainer = prepare_trainer(
        net=net,
        optimizer_name=args.optimizer_name,
        lr=args.lr,
        momentum=args.momentum,
        num_epochs=args.num_epochs,
        train_data=train_data,
        val_data=val_data,
        logging_dir_path=args.save_dir,
        use_gpus=use_gpus)

    trainer.run()
コード例 #18
0
def main():
    """
    Main body of script.
    """
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    batch_size = prepare_ke_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip())
    num_classes = net.classes if hasattr(net, "classes") else 1000
    input_image_size = net.in_size if hasattr(net, "in_size") else (args.input_size, args.input_size)

    train_data, val_data = get_data_rec(
        rec_train=args.rec_train,
        rec_train_idx=args.rec_train_idx,
        rec_val=args.rec_val,
        rec_val_idx=args.rec_val_idx,
        batch_size=batch_size,
        num_workers=args.num_workers,
        input_image_size=input_image_size,
        resize_inv_factor=args.resize_inv_factor,
        only_val=True)
    val_gen = get_data_generator(
        data_iterator=val_data,
        num_classes=num_classes)

    val_size = 50000
    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_gen=val_gen,
        val_size=val_size,
        batch_size=batch_size,
        num_gpus=args.num_gpus,
        calc_weight_count=True,
        extended_log=True)
コード例 #19
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ctx, batch_size = prepare_mx_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        dtype=args.dtype,
        classes=args.num_classes,
        in_channels=args.in_channels,
        do_hybridize=(not args.calc_flops),
        ctx=ctx)
    input_image_size = net.in_size if hasattr(net, 'in_size') else (32, 32)

    val_data = get_val_data_source(
        dataset_name=args.dataset,
        dataset_dir=args.data_dir,
        batch_size=batch_size,
        num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        val_data=val_data,
        data_source_needs_reset=False,
        dtype=args.dtype,
        ctx=ctx,
        input_image_size=input_image_size,
        in_channels=args.in_channels,
        # calc_weight_count=(not log_file_exist),
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True)
コード例 #20
0
ファイル: eval_gl_mch.py プロジェクト: cavalleria/imgclsmob
def main():
    args = parse_args()

    os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
    assert (args.batch_size == 1)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    ctx, batch_size = prepare_mx_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        dtype=args.dtype,
        net_extra_kwargs=None,
        load_ignore_extra=False,
        classes=args.num_classes,
        in_channels=args.in_channels,
        do_hybridize=False,
        ctx=ctx)

    test_data = get_val_data_source(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)

    tic = time.time()
    for batch in test_data:
        data_src_list, data_dst_list, labels_list = batch_fn(batch, ctx)
        outputs_src_list = [net(X) for X in data_src_list]
        assert (outputs_src_list is not None)
        pass
    logging.info("Time cost: {:.4f} sec".format(
        time.time() - tic))
コード例 #21
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus,
                                              batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        use_cuda=use_cuda,
                        remove_module=args.remove_module)
    if hasattr(net, 'module'):
        input_image_size = net.module.in_size[0] if hasattr(
            net.module, 'in_size') else args.input_size
    else:
        input_image_size = net.in_size[0] if hasattr(
            net, 'in_size') else args.input_size

    val_data = get_val_data_loader(data_dir=args.data_dir,
                                   batch_size=batch_size,
                                   num_workers=args.num_workers,
                                   input_image_size=input_image_size,
                                   resize_inv_factor=args.resize_inv_factor)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        val_data=val_data,
        use_cuda=use_cuda,
        # calc_weight_count=(not log_file_exist),
        input_image_size=(input_image_size, input_image_size),
        in_channels=args.in_channels,
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True)
コード例 #22
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    global_config.train = False

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        num_gpus=num_gpus)
    num_classes = net.classes if hasattr(net, 'classes') else 1000
    input_image_size = net.in_size[0] if hasattr(net, 'in_size') else args.input_size

    val_iterator, val_dataset_len = get_val_data_iterator(
        data_dir=args.data_dir,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        num_classes=num_classes)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_iterator=val_iterator,
        val_dataset_len=val_dataset_len,
        num_gpus=num_gpus,
        input_image_size=input_image_size,
        resize_inv_factor=args.resize_inv_factor,
        calc_weight_count=True,
        extended_log=True)
コード例 #23
0
ファイル: prep_oi4bb.py プロジェクト: yyfyan/imgclsmob
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    src_dir_path = args.data_dir
    if not os.path.exists(src_dir_path):
        logging.error('Source directory does not exist.')
        return
    dst_dir_path = args.save_dir
    if not os.path.exists(dst_dir_path):
        os.makedirs(dst_dir_path)
    remove_src = args.remove_archives
    rewrite = args.rewrite

    unique_label_names = get_label_list(src_dir_path=src_dir_path)

    data_name_list = ["validation", "test", "train"]
    archive_file_stem_lists = [["validation"], ["test"],
                               [
                                   'train_00', 'train_01', 'train_02',
                                   'train_03', 'train_04', 'train_05',
                                   'train_06', 'train_07', 'train_08'
                               ]]

    for i in range(len(data_name_list)):
        process_data(src_dir_path=src_dir_path,
                     dst_dir_path=dst_dir_path,
                     rewrite=rewrite,
                     remove_src=remove_src,
                     data_name=data_name_list[i],
                     archive_file_stem_list=archive_file_stem_lists[i],
                     unique_label_names=unique_label_names)
コード例 #24
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    batch_size = prepare_ke_context(num_gpus=args.num_gpus,
                                    batch_size=args.batch_size)

    num_classes = 1000
    net = prepare_model(model_name=args.model,
                        classes=num_classes,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip())

    train_data, val_data = get_data_rec(rec_train=args.rec_train,
                                        rec_train_idx=args.rec_train_idx,
                                        rec_val=args.rec_val,
                                        rec_val_idx=args.rec_val_idx,
                                        batch_size=batch_size,
                                        num_workers=args.num_workers)
    val_gen = get_data_generator(data_iterator=val_data,
                                 num_classes=num_classes)

    val_size = 50000
    assert (args.use_pretrained or args.resume.strip())
    test(net=net,
         val_gen=val_gen,
         val_size=val_size,
         batch_size=batch_size,
         num_gpus=args.num_gpus,
         calc_weight_count=True,
         extended_log=True)
コード例 #25
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    global_config.train = False

    num_gpus = args.num_gpus
    if num_gpus > 0:
        cuda.get_device(0).use()

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        net_extra_kwargs={"aux": False, "fixed_size": False},
        use_gpus=(num_gpus > 0))

    test_dataset = get_test_dataset(
        dataset_name=args.dataset,
        dataset_dir=args.data_dir)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        test_dataset=test_dataset,
        num_gpus=num_gpus,
        num_classes=args.num_classes,
        calc_weight_count=True,
        extended_log=True,
        dataset_metainfo=get_metainfo(args.dataset))
コード例 #26
0
ファイル: eval_pt.py プロジェクト: jdc08161063/imgclsmob
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    use_cuda, batch_size = prepare_pt_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    classes = 1000
    net = prepare_model(
        model_name=args.model,
        classes=classes,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        use_cuda=use_cuda)

    train_data, val_data = get_data_loader(
        data_dir=args.data_dir,
        batch_size=batch_size,
        num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_data=val_data,
        use_cuda=use_cuda,
        # calc_weight_count=(not log_file_exist),
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        extended_log=True)
コード例 #27
0
def main():
    """
    Main body of script.
    """
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ctx, batch_size = prepare_mx_context(num_gpus=args.num_gpus,
                                         batch_size=args.batch_size)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    use_teacher = (args.teacher_models
                   is not None) and (args.teacher_models.strip() != "")

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        dtype=args.dtype,
        net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
        tune_layers=args.tune_layers,
        classes=args.num_classes,
        in_channels=args.in_channels,
        do_hybridize=(not args.not_hybridize),
        initializer=get_initializer(initializer_name=args.initializer),
        ctx=ctx)
    assert (hasattr(net, "classes"))
    num_classes = net.classes

    teacher_net = None
    discrim_net = None
    discrim_loss_func = None
    if use_teacher:
        teacher_nets = []
        for teacher_model in args.teacher_models.split(","):
            teacher_net = prepare_model(
                model_name=teacher_model.strip(),
                use_pretrained=True,
                pretrained_model_file_path="",
                dtype=args.dtype,
                net_extra_kwargs=ds_metainfo.train_net_extra_kwargs,
                do_hybridize=(not args.not_hybridize),
                ctx=ctx)
            assert (teacher_net.classes == net.classes)
            assert (teacher_net.in_size == net.in_size)
            teacher_nets.append(teacher_net)
        if len(teacher_nets) > 0:
            teacher_net = Concurrent(stack=True,
                                     prefix="",
                                     branches=teacher_nets)
            for k, v in teacher_net.collect_params().items():
                v.grad_req = "null"
            if not args.not_discriminator:
                discrim_net = MealDiscriminator()
                discrim_net.cast(args.dtype)
                if not args.not_hybridize:
                    discrim_net.hybridize(static_alloc=True, static_shape=True)
                discrim_net.initialize(mx.init.MSRAPrelu(), ctx=ctx)
                for k, v in discrim_net.collect_params().items():
                    v.lr_mult = args.dlr_factor
                discrim_loss_func = MealAdvLoss()

    train_data = get_train_data_source(ds_metainfo=ds_metainfo,
                                       batch_size=batch_size,
                                       num_workers=args.num_workers)
    val_data = get_val_data_source(ds_metainfo=ds_metainfo,
                                   batch_size=batch_size,
                                   num_workers=args.num_workers)
    batch_fn = get_batch_fn(ds_metainfo=ds_metainfo)

    num_training_samples = len(
        train_data._dataset
    ) if not ds_metainfo.use_imgrec else ds_metainfo.num_training_samples
    trainer, lr_scheduler = prepare_trainer(
        net=net,
        optimizer_name=args.optimizer_name,
        wd=args.wd,
        momentum=args.momentum,
        lr_mode=args.lr_mode,
        lr=args.lr,
        lr_decay_period=args.lr_decay_period,
        lr_decay_epoch=args.lr_decay_epoch,
        lr_decay=args.lr_decay,
        target_lr=args.target_lr,
        poly_power=args.poly_power,
        warmup_epochs=args.warmup_epochs,
        warmup_lr=args.warmup_lr,
        warmup_mode=args.warmup_mode,
        batch_size=batch_size,
        num_epochs=args.num_epochs,
        num_training_samples=num_training_samples,
        dtype=args.dtype,
        gamma_wd_mult=args.gamma_wd_mult,
        beta_wd_mult=args.beta_wd_mult,
        bias_wd_mult=args.bias_wd_mult,
        state_file_path=args.resume_state)

    if args.save_dir and args.save_interval:
        param_names = ds_metainfo.val_metric_capts + ds_metainfo.train_metric_capts + [
            "Train.Loss", "LR"
        ]
        lp_saver = TrainLogParamSaver(
            checkpoint_file_name_prefix="{}_{}".format(ds_metainfo.short_label,
                                                       args.model),
            last_checkpoint_file_name_suffix="last",
            best_checkpoint_file_name_suffix=None,
            last_checkpoint_dir_path=args.save_dir,
            best_checkpoint_dir_path=None,
            last_checkpoint_file_count=2,
            best_checkpoint_file_count=2,
            checkpoint_file_save_callback=save_params,
            checkpoint_file_exts=(".params", ".states"),
            save_interval=args.save_interval,
            num_epochs=args.num_epochs,
            param_names=param_names,
            acc_ind=ds_metainfo.saver_acc_ind,
            # bigger=[True],
            # mask=None,
            score_log_file_path=os.path.join(args.save_dir, "score.log"),
            score_log_attempt_value=args.attempt,
            best_map_log_file_path=os.path.join(args.save_dir, "best_map.log"))
    else:
        lp_saver = None

    val_metric = get_composite_metric(ds_metainfo.val_metric_names,
                                      ds_metainfo.val_metric_extra_kwargs)
    train_metric = get_composite_metric(ds_metainfo.train_metric_names,
                                        ds_metainfo.train_metric_extra_kwargs)
    loss_metrics = [LossValue(name="loss"), LossValue(name="dloss")]

    loss_kwargs = {
        "sparse_label": (not (args.mixup or args.label_smoothing)
                         and not (use_teacher and (teacher_net is not None)))
    }
    if ds_metainfo.loss_extra_kwargs is not None:
        loss_kwargs.update(ds_metainfo.loss_extra_kwargs)
    loss_func = get_loss(ds_metainfo.loss_name, loss_kwargs)

    train_net(batch_size=batch_size,
              num_epochs=args.num_epochs,
              start_epoch1=args.start_epoch,
              train_data=train_data,
              val_data=val_data,
              batch_fn=batch_fn,
              data_source_needs_reset=ds_metainfo.use_imgrec,
              dtype=args.dtype,
              net=net,
              teacher_net=teacher_net,
              discrim_net=discrim_net,
              trainer=trainer,
              lr_scheduler=lr_scheduler,
              lp_saver=lp_saver,
              log_interval=args.log_interval,
              mixup=args.mixup,
              mixup_epoch_tail=args.mixup_epoch_tail,
              label_smoothing=args.label_smoothing,
              num_classes=num_classes,
              grad_clip_value=args.grad_clip,
              batch_size_scale=args.batch_size_scale,
              val_metric=val_metric,
              train_metric=train_metric,
              loss_metrics=loss_metrics,
              loss_func=loss_func,
              discrim_loss_func=discrim_loss_func,
              ctx=ctx)
コード例 #28
0
ファイル: convert_models.py プロジェクト: jropen/imgclsmob
def main():
    args = parse_args()

    packages = []
    pip_packages = []
    if (args.src_fwk == "gluon") or (args.dst_fwk == "gluon"):
        packages += ['mxnet']
        pip_packages += ['mxnet-cu92']
    if (args.src_fwk == "pytorch") or (args.dst_fwk == "pytorch"):
        packages += ['torch', 'torchvision']
    if (args.src_fwk == "chainer") or (args.dst_fwk == "chainer"):
        packages += ['chainer', 'chainercv']
        pip_packages += ['cupy-cuda92', 'chainer', 'chainercv']
    if (args.src_fwk == "keras") or (args.dst_fwk == "keras"):
        packages += ['keras']
        pip_packages += [
            'keras', 'keras-mxnet', 'keras-applications', 'keras-preprocessing'
        ]
    if (args.src_fwk == "tensorflow") or (args.dst_fwk == "tensorflow"):
        packages += ['tensorflow-gpu']
        pip_packages += ['tensorflow-gpu', 'tensorpack', 'mxnet-cu90']

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=packages,
        log_pip_packages=pip_packages)

    ctx = mx.cpu()
    use_cuda = False

    src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2 = prepare_src_model(
        src_fwk=args.src_fwk,
        src_model=args.src_model,
        src_params_file_path=args.src_params,
        dst_fwk=args.dst_fwk,
        ctx=ctx,
        use_cuda=use_cuda)

    dst_params, dst_param_keys, dst_net = prepare_dst_model(
        dst_fwk=args.dst_fwk,
        dst_model=args.dst_model,
        src_fwk=args.src_fwk,
        ctx=ctx,
        use_cuda=use_cuda)

    if (args.dst_fwk in ["keras", "tensorflow"]) and any(
        [s.find("convgroup") >= 0 for s in dst_param_keys]):
        assert (len(src_param_keys) <= len(dst_param_keys))
    else:
        assert (len(src_param_keys) == len(dst_param_keys))

    if args.src_fwk == "gluon" and args.dst_fwk == "gluon":
        convert_gl2gl(dst_net=dst_net,
                      dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys,
                      ctx=ctx)
    elif args.src_fwk == "pytorch" and args.dst_fwk == "pytorch":
        convert_pt2pt(dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys,
                      src_model=args.src_model,
                      dst_model=args.dst_model)
    elif args.src_fwk == "gluon" and args.dst_fwk == "pytorch":
        convert_gl2pt(dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys)
    elif args.src_fwk == "gluon" and args.dst_fwk == "chainer":
        convert_gl2ch(dst_net=dst_net,
                      dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys,
                      ext_src_param_keys=ext_src_param_keys,
                      ext_src_param_keys2=ext_src_param_keys2,
                      src_model=args.src_model)
    elif args.src_fwk == "gluon" and args.dst_fwk == "keras":
        convert_gl2ke(dst_net=dst_net,
                      dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys)
    elif args.src_fwk == "gluon" and args.dst_fwk == "tensorflow":
        convert_gl2tf(dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys)
    elif args.src_fwk == "pytorch" and args.dst_fwk == "gluon":
        convert_pt2gl(dst_net=dst_net,
                      dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys,
                      ctx=ctx)
    elif args.src_fwk == "mxnet" and args.dst_fwk == "gluon":
        convert_mx2gl(dst_net=dst_net,
                      dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys,
                      src_model=args.src_model,
                      ctx=ctx)
    elif args.src_fwk == "tensorflow" and args.dst_fwk == "tensorflow":
        convert_tf2tf(dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys)
    elif args.src_fwk == "tensorflow" and args.dst_fwk == "gluon":
        convert_tf2gl(dst_net=dst_net,
                      dst_params_file_path=args.dst_params,
                      dst_params=dst_params,
                      dst_param_keys=dst_param_keys,
                      src_params=src_params,
                      src_param_keys=src_param_keys,
                      ctx=ctx)
    else:
        raise NotImplementedError

    logging.info('Convert {}-model {} into {}-model {}'.format(
        args.src_fwk, args.src_model, args.dst_fwk, args.dst_model))
コード例 #29
0
def main():
    """
    Main body of script.
    """
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    ctx, batch_size = prepare_mx_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        dtype=args.dtype,
        net_extra_kwargs=ds_metainfo.net_extra_kwargs,
        load_ignore_extra=ds_metainfo.load_ignore_extra,
        classes=args.num_classes,
        in_channels=args.in_channels,
        do_hybridize=(ds_metainfo.allow_hybridize and (not args.calc_flops)),
        ctx=ctx)
    assert (hasattr(net, "in_size"))
    input_image_size = net.in_size

    if args.data_subset == "val":
        get_test_data_source_class = get_val_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.val_metric_names,
            metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
    else:
        get_test_data_source_class = get_test_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.test_metric_names,
            metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
    test_data = get_test_data_source_class(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)
    batch_fn = get_batch_fn(use_imgrec=ds_metainfo.use_imgrec)

    if not args.not_show_progress:
        test_data = tqdm(test_data)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        test_data=test_data,
        batch_fn=batch_fn,
        data_source_needs_reset=ds_metainfo.use_imgrec,
        metric=test_metric,
        dtype=args.dtype,
        ctx=ctx,
        input_image_size=input_image_size,
        in_channels=args.in_channels,
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True,
        show_bad_samples=args.show_bad_samples)
コード例 #30
0
def main():
    """
    Main body of script.
    """
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
    ds_metainfo.update(args=args)

    use_cuda, batch_size = prepare_pt_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    net = prepare_model(
        model_name=args.model,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        use_cuda=use_cuda,
        net_extra_kwargs=ds_metainfo.net_extra_kwargs,
        load_ignore_extra=ds_metainfo.load_ignore_extra,
        num_classes=args.num_classes,
        in_channels=args.in_channels,
        remove_module=args.remove_module)
    real_net = net.module if hasattr(net, "module") else net
    input_image_size = real_net.in_size[0] if hasattr(real_net, "in_size") else args.input_size

    if args.data_subset == "val":
        get_test_data_source_class = get_val_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.val_metric_names,
            metric_extra_kwargs=ds_metainfo.val_metric_extra_kwargs)
    else:
        get_test_data_source_class = get_test_data_source
        test_metric = get_composite_metric(
            metric_names=ds_metainfo.test_metric_names,
            metric_extra_kwargs=ds_metainfo.test_metric_extra_kwargs)
    test_data = get_test_data_source_class(
        ds_metainfo=ds_metainfo,
        batch_size=args.batch_size,
        num_workers=args.num_workers)

    if not args.not_show_progress:
        test_data = tqdm(test_data)

    assert (args.use_pretrained or args.resume.strip() or args.calc_flops_only)
    test(
        net=net,
        test_data=test_data,
        metric=test_metric,
        use_cuda=use_cuda,
        input_image_size=(input_image_size, input_image_size),
        in_channels=args.in_channels,
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        calc_flops_only=args.calc_flops_only,
        extended_log=True,
        show_bad_samples=args.show_bad_samples)