コード例 #1
0
ファイル: test.py プロジェクト: Res2Net/Res2Net-SOD100K
def main():
    global cfg
    model_lib = importlib.import_module("model." + cfg.MODEL.ARCH)
    predefine_file = cfg.TEST.MODEL_CONFIG
    model = model_lib.build_model(predefine=predefine_file)
    model.cuda()
    prams, flops = simplesum(model, inputsize=(3, 224, 224), device=0)
    print('  + Number of params: %.4fM' % (prams / 1e6))
    print('  + Number of FLOPs: %.4fG' % (flops / 1e9))
    this_checkpoint = cfg.TEST.CHECKPOINT
    if os.path.isfile(this_checkpoint):
        print("=> loading checkpoint '{}'".format(this_checkpoint))
        checkpoint = torch.load(this_checkpoint)
        loadepoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            this_checkpoint, checkpoint['epoch']))
        test(model, cfg.TEST.DATASETS, loadepoch)
        eval(cfg.TASK, loadepoch)
    else:
        print(this_checkpoint, "Not found.")
コード例 #2
0
ファイル: finetune.py プロジェクト: Snow02203835/SOD100K
def main():
    global cfg, best_mae, best_epoch

    model_lib = importlib.import_module("model." + cfg.MODEL.ARCH)
    if cfg.PRUNE.BNS:
        global gOctaveCBR
        gOctaveCBR = model_lib.gOctaveCBR
    model_lib = importlib.import_module("model." + cfg.MODEL.ARCH)
    if cfg.AUTO.ENABLE:
        layer_config_dir = os.path.join(cfg.DATA.SAVEDIR, cfg.TASK,
                                        'layer_configs')
        if cfg.AUTO.ENABLE:
            predefine_file = os.path.join(layer_config_dir,
                                          "layer_config_0.bin")
            model = model_lib.build_model(predefine=predefine_file)
        else:
            model = model_lib.build_model(predefine=cfg.AUTO.PREDEFINE,
                                          save_path=layer_config_dir)
    else:
        model = model_lib.build_model(basic_split=cfg.MODEL.BASIC_SPLIT)
    model.cuda()

    prams, flops = simplesum(model, inputsize=(3, 224, 224), device=0)
    print("basic_split: " + str(cfg.MODEL.BASIC_SPLIT))
    log_string('  + Number of params: %.4fM' % (prams / 1e6), display=False)
    log_string('  + Number of FLOPs: %.4fG' % (flops / 1e9), display=False)
    if cfg.FINETUNE.SOLVER.METHOD == 'SGD':
        optimizer = torch.optim.SGD(
            model.parameters(),
            cfg.FINETUNE.SOLVER.LR,
            momentum=cfg.FINETUNE.SOLVER.MOMENTUM,
            weight_decay=cfg.FINETUNE.SOLVER.WEIGHT_DECAY)
    elif cfg.FINETUNE.SOLVER.METHOD == 'Adam':
        optimizer = torch.optim.Adam(
            model.parameters(),
            lr=cfg.FINETUNE.SOLVER.LR,
            betas=(0.9, 0.99),
            eps=1e-08,
            weight_decay=cfg.FINETUNE.SOLVER.WEIGHT_DECAY)
    else:
        optimizer = None
        print("WARNING: Method not implmented.")
    if cfg.DATA.PRETRAIN != '':
        model = load_pretrained(model, cfg.DATA.PRETRAIN)
    start_epoch = 0
    check_point_dir = os.path.join(cfg.DATA.SAVEDIR, cfg.TASK, 'checkpoint')
    this_checkpoint = os.path.join(
        check_point_dir, 'checkpoint_epoch{}.pth.tar'.format(args.epoch))
    if os.path.isfile(this_checkpoint):
        log_string("=> loading checkpoint '{}'".format(this_checkpoint))
        checkpoint = torch.load(this_checkpoint)
        from_epoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        # optimizer.load_state_dict(checkpoint['optimizer'])
        log_string("=> loaded checkpoint '{}' (epoch {})".format(
            this_checkpoint, checkpoint['epoch']))
    else:
        log_string("=> no checkpoint found at '{}'".format(this_checkpoint))
        exit()

    train_loader, val_loader = prepare_data(cfg.DATA.DIR, cfg.VAL.DIR)
    if cfg.FINETUNE.SOLVER.ADJUST_STEP:
        if cfg.FINETUNE.SOLVER.LR_SCHEDULER == 'step':
            print("step", cfg.FINETUNE.SOLVER.STEPS)
            print(cfg.FINETUNE.SOLVER.LR)
            scheduler = lr_scheduler.MultiStepLR(optimizer,
                                                 cfg.FINETUNE.SOLVER.STEPS,
                                                 gamma=0.1)
        elif cfg.FINETUNE.SOLVER.LR_SCHEDULER == 'cosine':
            max_epoch = cfg.FINETUNE.SOLVER.MAX_EPOCHS
            scheduler = lr_scheduler.CosineAnnealingLR(optimizer,
                                                       max_epoch,
                                                       eta_min=0)
        else:
            raise ValueError("Unsupported scheduler.")

    model = model_lib.build_model(epoch=from_epoch,
                                  basic_split=cfg.MODEL.BASIC_SPLIT,
                                  model=model,
                                  save_path=layer_config_dir,
                                  predefine=predefine_file,
                                  finetune=True,
                                  finetune_thres=cfg.FINETUNE.THRES,
                                  load_weight='FINETUNE')
    model.cuda()
    start_epoch = 0
    prams, flops = simplesum(model, inputsize=(3, 224, 224), device=0)
    print("After finetune: basic_split: " + str(cfg.MODEL.BASIC_SPLIT))
    log_string('  + Number of params: %.4fM' % (prams / 1e6), display=False)
    log_string('  + Number of FLOPs: %.4fG' % (flops / 1e9), display=False)
    for epoch in range(start_epoch, cfg.FINETUNE.SOLVER.MAX_EPOCHS):
        if cfg.FINETUNE.SOLVER.ADJUST_STEP:
            scheduler.step()
            lr = scheduler.get_lr()[0]
            log_string("lr: " + str(lr))

        train(train_loader, model, optimizer, epoch)
        mae = val(val_loader, model, epoch)
        if cfg.TEST.ENABLE and (epoch + 1) >= cfg.TEST.BEGIN and (
                epoch + 1) % cfg.TEST.INTERVAL == 0:
            test(model, cfg.TEST.DATASETS, epoch + 1)
        is_best = mae < best_mae
        best_mae = min(mae, best_mae)
        if is_best:
            best_epoch = epoch + 1
        log_string(" epoch: " + str(epoch + 1) + " mae: " + str(mae) +
                   " best_epoch: " + str(best_epoch) + " best_mae: " +
                   str(best_mae))
        val_log_string(" epoch: " + str(epoch + 1) + " mae: " + str(mae) +
                       " best_epoch: " + str(best_epoch) + " best_mae: " +
                       str(best_mae))
        # Save checkpoint
        save_file = os.path.join(
            finetune_check_point_dir,
            'checkpoint_epoch{}.pth.tar'.format(epoch + 1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': cfg.MODEL.ARCH,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            },
            filename=save_file)
コード例 #3
0
ファイル: test.py プロジェクト: YaoxinShi/SOD100K
def main():
    global cfg
    model_lib = importlib.import_module("model." + cfg.MODEL.ARCH)
    predefine_file = cfg.TEST.MODEL_CONFIG
    model = model_lib.build_model(predefine=predefine_file)
    #model.cuda()
    prams, flops = simplesum(model, inputsize=(3, 224, 224), device=0)
    print('  + Number of params: %.4fM' % (prams / 1e6))
    print('  + Number of FLOPs: %.4fG' % (flops / 1e9))
    this_checkpoint = cfg.TEST.CHECKPOINT
    if os.path.isfile(this_checkpoint):
        print("=> loading checkpoint '{}'".format(this_checkpoint))
        checkpoint = torch.load(this_checkpoint, map_location=torch.device('cpu'))
        loadepoch = checkpoint['epoch']
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            this_checkpoint, checkpoint['epoch']))
        convert_onnx_openvino = False
        if convert_onnx_openvino:
            print(">>> convert pytorch to onnx")
            x = torch.randn(1, 3, 400, 400, requires_grad=True) # Input to the model. (batch,channel,width,height)
            torch.onnx.export(model,             # model being run
                      x,                         # model input (or a tuple for multiple inputs)
                      "out.onnx",                # where to save the model (can be a file or file-like object)
                      export_params=True,        # store the trained parameter weights inside the model file
                      opset_version=10,          # the ONNX version to export the model to
                      do_constant_folding=True,  # whether to execute constant folding for optimization
                      input_names = ['input'],   # the model's input names
                      output_names = ['output'], # the model's output names
                      verbose=False)             # print out a human-readable representation of the network
            #### Here use opset_version=10. pytorch->onnx warning and onnx->openvino pass.
            # C:\Python36\lib\site-packages\torch\onnx\symbolic_helper.py:243: UserWarning:
            # You are trying to export the model with onnx:Resize for ONNX opset version 10.
            # This operator might cause results to not match the expected results by PyTorch.
            # ONNX's Upsample/Resize operator did not match Pytorch's Interpolation until opset 11.
            # Attributes to determine how to transform the input were added in onnx:Resize in
            # opset 11 to support Pytorch's behavior (like coordinate_transformation_mode and nearest_mode).
            # We recommend using opset 11 and above for models using this operator.
            #### if use opset_version=11. pytorch->onnx pass and onnx->openvino fail.
            # [ ERROR ]  Exception occurred during running replacer "REPLACEMENT_ID" (<class 'extensions.load.onnx.loader.ONNXLoader'>):
            # Unexpected exception happened during extracting attributes for node Resize_51.
            # Original exception message: ONNX Resize operation from opset 11 is not supported.
            #### from https://blog.csdn.net/github_28260175/article/details/105704337
            # F.interpolate(mode=nearest)                       ==> torch.onnx opset10 pass
            # F.interpolate(mode=bilinear, align_corners=False) ==> torch.onnx opset10 warning (may cause inference mismatch)
            # F.interpolate(mode=bilinear, align_corners=True)  ==> torch.onnx opset10 fail
            print(">>> verify onnx model")
            import onnx
            onnx_model = onnx.load("out.onnx")
            onnx.checker.check_model(onnx_model)
            print(">>> convert onnx to OpenVINO")
            os.system('python "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\mo.py" --input_model out.onnx --data_type FP16')
            #print(">>> convert onnx to TF")
            #from onnx_tf.backend import prepare
            #tf_rep = prepare(onnx_model, strict=False)
            #tf_rep.export_graph("out.pb")
            #print(">>> convert TF to OpenVINO")
            #os.system('python "C:\Program Files (x86)\IntelSWTools\openvino\deployment_tools\model_optimizer\mo.py" --input_model out.pb')
            #   Take care of the version of tensorflow/onnx/onnx-tf
            #   pip install tensorflow==2.2.0
            #   pip install tensorflow-addons==0.11.1
            #   pip install onnx==1.7.0
            #   pip install --user https://github.com/onnx/onnx-tensorflow/archive/master.zip, will get 1.6.0
            #   "C:\Python36\Scripts\onnx-tf.exe convert -i out.onnx -o out.pb"
        else:
            test(model, cfg.TEST.DATASETS, loadepoch)
            eval(cfg.TASK, loadepoch)
    else:
        print(this_checkpoint, "Not found.")
コード例 #4
0
def main():
    global cfg, best_mae, best_epoch

    model_lib = importlib.import_module("model." + cfg.MODEL.ARCH)
    if cfg.PRUNE.BNS:
        global gOctaveCBR
        gOctaveCBR = model_lib.gOctaveCBR
    if cfg.AUTO.ENABLE:
        layer_config_dir = os.path.join(cfg.DATA.SAVEDIR, cfg.TASK,
                                        'layer_configs')
        model = model_lib.build_model(basic_split=cfg.MODEL.BASIC_SPLIT,
                                      predefine=cfg.AUTO.PREDEFINE,
                                      save_path=layer_config_dir,
                                      expand=cfg.AUTO.EXPAND)
    else:
        print("Enable AUTO to train CSNet!")
    if cfg.AUTO.FLOPS.ENABLE:
        if cfg.AUTO.FLOPS.EXPAND != -1.0:  # balance the flops between different resolutions.
            log_string(
                "use flops_expand to balance the flops between different resolutions, default=1."
            )
            model.flops_hook(expandflop=cfg.AUTO.FLOPS.EXPAND)
        else:
            model.flops_hook()
        model.set_batchsize(cfg.DATA.BATCH_SIZE)
    model.cuda()
    prams, flops = simplesum(model, inputsize=(3, 224, 224), device=0)
    log_string("basic_split: " + str(cfg.MODEL.BASIC_SPLIT))
    log_string('  + Number of params: %.4fM' % (prams / 1e6), display=False)
    log_string('  + Number of FLOPs: %.4fG' % (flops / 1e9), display=False)
    if cfg.SOLVER.METHOD == 'Adam_dynamic_weight_decay':
        log_string("Setting wd of bn in ILBlock to 0.")
        normal_parameters = []
        picked_parameters = []
        for pname, p in model.named_parameters():
            if 'stage' in pname and (
                    'conv1x1.bns' in pname or 'conv3x3_1.bns' in pname
                    or 'conv3x3_1.bns' in pname) and 'weight' in pname:
                picked_parameters.append(p)
            else:
                normal_parameters.append(p)
        optimizer = torch.optim.Adam([
            {
                'params': normal_parameters,
                'lr': cfg.SOLVER.LR,
                'weight_decay': cfg.SOLVER.WEIGHT_DECAY
            },
            {
                'params': picked_parameters,
                'lr': cfg.SOLVER.LR,
                'weight_decay': 0.
            },
        ],
                                     lr=cfg.SOLVER.LR,
                                     betas=(0.9, 0.99),
                                     eps=1e-08,
                                     weight_decay=cfg.SOLVER.WEIGHT_DECAY)
    else:
        optimizer = None
        print("WARNING: Method not implmented.")
    if cfg.DATA.PRETRAIN != '':
        model = load_pretrained(model, cfg.DATA.PRETRAIN)
    start_epoch = 0
    if cfg.DATA.RESUME != '':
        if os.path.isfile(cfg.DATA.RESUME):
            log_string("=> loading checkpoint '{}'".format(cfg.DATA.RESUME))
            checkpoint = torch.load(cfg.DATA.RESUME)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            log_string("=> loaded checkpoint '{}' (epoch {})".format(
                cfg.DATA.RESUME, checkpoint['epoch']))
        else:
            log_string("=> no checkpoint found at '{}'".format(
                cfg.DATA.RESUME))

    train_loader, val_loader = prepare_data(cfg.DATA.DIR, cfg.VAL.DIR)
    if cfg.SOLVER.ADJUST_STEP:
        if cfg.SOLVER.LR_SCHEDULER == 'step':
            scheduler = lr_scheduler.MultiStepLR(optimizer,
                                                 cfg.SOLVER.STEPS,
                                                 gamma=0.1)
        else:
            raise ValueError("Unsupported scheduler.")

    for epoch in range(start_epoch, cfg.SOLVER.MAX_EPOCHS):
        if (cfg.SOLVER.FINETUNE.ADJUST_STEP
                and epoch > cfg.AUTO.FINETUNE) or cfg.SOLVER.ADJUST_STEP:
            scheduler.step()
            lr = scheduler.get_lr()[0]
            log_string("lr: " + str(lr))

        train(train_loader, model, optimizer, epoch)
        mae = val(val_loader, model, epoch)
        is_best = mae < best_mae
        best_mae = min(mae, best_mae)
        if is_best:
            best_epoch = epoch + 1
        log_string(" epoch: " + str(epoch + 1) + " mae: " + str(mae) +
                   " best_epoch: " + str(best_epoch) + " best_mae: " +
                   str(best_mae))
        val_log_string(" epoch: " + str(epoch + 1) + " mae: " + str(mae) +
                       " best_epoch: " + str(best_epoch) + " best_mae: " +
                       str(best_mae))
        # Save checkpoint
        save_file = os.path.join(
            check_point_dir, 'checkpoint_epoch{}.pth.tar'.format(epoch + 1))
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': cfg.MODEL.ARCH,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            },
            filename=save_file)