Пример #1
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus,
                                              batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        use_cuda=use_cuda,
                        remove_module=args.remove_module)
    if hasattr(net, 'module'):
        input_image_size = net.module.in_size[0] if hasattr(
            net.module, 'in_size') else args.input_size
    else:
        input_image_size = net.in_size[0] if hasattr(
            net, 'in_size') else args.input_size

    train_data, val_data = get_data_loader(
        data_dir=args.data_dir,
        batch_size=batch_size,
        num_workers=args.num_workers,
        input_image_size=input_image_size,
        resize_inv_factor=args.resize_inv_factor)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_data=val_data,
        use_cuda=use_cuda,
        # calc_weight_count=(not log_file_exist),
        input_image_size=input_image_size,
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        extended_log=True)
Пример #2
0
def main():
    args = parse_args()

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    use_cuda, batch_size = prepare_pt_context(
        num_gpus=args.num_gpus,
        batch_size=args.batch_size)

    classes = 1000
    net = prepare_model(
        model_name=args.model,
        classes=classes,
        use_pretrained=args.use_pretrained,
        pretrained_model_file_path=args.resume.strip(),
        use_cuda=use_cuda)

    train_data, val_data = get_data_loader(
        data_dir=args.data_dir,
        batch_size=batch_size,
        num_workers=args.num_workers)

    assert (args.use_pretrained or args.resume.strip())
    test(
        net=net,
        val_data=val_data,
        use_cuda=use_cuda,
        # calc_weight_count=(not log_file_exist),
        calc_weight_count=True,
        calc_flops=args.calc_flops,
        extended_log=True)
Пример #3
0
def main():
    args = parse_args()
    args.seed = init_rand(seed=args.seed)

    _, log_file_exist = initialize_logging(
        logging_dir_path=args.save_dir,
        logging_file_name=args.logging_file_name,
        script_args=args,
        log_packages=args.log_packages,
        log_pip_packages=args.log_pip_packages)

    use_cuda, batch_size = prepare_pt_context(num_gpus=args.num_gpus,
                                              batch_size=args.batch_size)

    net = prepare_model(model_name=args.model,
                        use_pretrained=args.use_pretrained,
                        pretrained_model_file_path=args.resume.strip(),
                        use_cuda=use_cuda)
    if hasattr(net, 'module'):
        input_image_size = net.module.in_size[0] if hasattr(
            net.module, 'in_size') else args.input_size
    else:
        input_image_size = net.in_size[0] if hasattr(
            net, 'in_size') else args.input_size

    train_data, val_data = get_data_loader(
        data_dir=args.data_dir,
        batch_size=batch_size,
        num_workers=args.num_workers,
        input_image_size=input_image_size,
        resize_inv_factor=args.resize_inv_factor)

    # num_training_samples = 1281167
    optimizer, lr_scheduler, start_epoch = prepare_trainer(
        net=net,
        optimizer_name=args.optimizer_name,
        wd=args.wd,
        momentum=args.momentum,
        lr_mode=args.lr_mode,
        lr=args.lr,
        lr_decay_period=args.lr_decay_period,
        lr_decay_epoch=args.lr_decay_epoch,
        lr_decay=args.lr_decay,
        # warmup_epochs=args.warmup_epochs,
        # batch_size=batch_size,
        num_epochs=args.num_epochs,
        # num_training_samples=num_training_samples,
        state_file_path=args.resume_state)
    # if start_epoch is not None:
    #     args.start_epoch = start_epoch

    if args.save_dir and args.save_interval:
        lp_saver = TrainLogParamSaver(
            checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
            last_checkpoint_file_name_suffix="last",
            best_checkpoint_file_name_suffix=None,
            last_checkpoint_dir_path=args.save_dir,
            best_checkpoint_dir_path=None,
            last_checkpoint_file_count=2,
            best_checkpoint_file_count=2,
            checkpoint_file_save_callback=save_params,
            checkpoint_file_exts=('.pth', '.states'),
            save_interval=args.save_interval,
            num_epochs=args.num_epochs,
            param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss'],
            acc_ind=2,
            # bigger=[True],
            # mask=None,
            score_log_file_path=os.path.join(args.save_dir, 'score.log'),
            score_log_attempt_value=args.attempt,
            best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
    else:
        lp_saver = None

    train_net(batch_size=batch_size,
              num_epochs=args.num_epochs,
              start_epoch1=args.start_epoch,
              train_data=train_data,
              val_data=val_data,
              net=net,
              optimizer=optimizer,
              lr_scheduler=lr_scheduler,
              lp_saver=lp_saver,
              log_interval=args.log_interval,
              use_cuda=use_cuda)