Beispiel #1
0
def main():
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.PRUNING.FINETUNE.LOG_DIR):
        os.makedirs(cfg.PRUNING.FINETUNE.LOG_DIR)
    init_log('global', logging.INFO)
    if cfg.PRUNING.FINETUNE.LOG_DIR:
        add_file_handler(
            'global', os.path.join(cfg.PRUNING.FINETUNE.LOG_DIR, 'logs.txt'),
            logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))

    train_dataloader = build_data_loader()
    model = PruningSiamModel()
    # load model from the pruning model
    logger.info('load pretrain from {}.'.format(
        cfg.PRUNING.FINETUNE.PRETRAIN_PATH))
    model = load_pretrain(model, cfg.PRUNING.FINETUNE.PRETRAIN_PATH)
    logger.info('load pretrain done')
    logger.info('begin to pruning the model')
    model = prune_model(model).cuda().train()
    logger.info('pruning finished!')

    optimizer, lr_scheduler = build_optimizer_lr(
        model, cfg.PRUNING.FINETUNE.START_EPOCH)
    if cfg.PRUNING.FINETUNE.RESUME:
        logger.info('resume from {}'.format(cfg.PRUNING.FINETUNE.RESUME_PATH))
        model, optimizer, cfg.PRUNING.FINETUNE.START_EPOCH = restore_from(
            model, optimizer, cfg.PRUNING.FINETUNE.RESUME_PATH)
        logger.info('resume done!')
    train(train_dataloader, model, optimizer, lr_scheduler)
Beispiel #2
0
def main():
    rank, world_size = dist_init()
    cfg.merge_from_file(args.cfg)
    if rank == 0:
        if not os.path.exists(cfg.TRAIN.LOG_DIR):
            os.makedirs(cfg.TRAIN.LOG_DIR)
        init_log('global', logging.INFO)
        if cfg.TRAIN.LOG_DIR:
            add_file_handler('global',
                             os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'),
                             logging.INFO)
        logger.info("Version Information: \n{}\n".format(commit()))
        logger.info("config \n{}".format(json.dumps(cfg, indent=4)))

    logger.info('dist init done!')
    train_dataloader = build_data_loader()
    model = get_model('BaseSiamModel').cuda().train()
    dist_model = DistModule(model)
    optimizer, lr_scheduler = build_optimizer_lr(dist_model.module,
                                                 cfg.TRAIN.START_EPOCH)
    if cfg.TRAIN.BACKBONE_PRETRAIN:
        logger.info('load backbone from {}.'.format(cfg.TRAIN.BACKBONE_PATH))
        model.backbone = load_pretrain(model.backbone, cfg.TRAIN.BACKBONE_PATH)
        logger.info('load backbone done!')
    if cfg.TRAIN.RESUME:
        logger.info('resume from {}'.format(cfg.TRAIN.RESUME_PATH))
        model, optimizer, cfg.TRAIN.START_EPOCH = restore_from(
            model, optimizer, cfg.TRAIN.RESUME_PATH)
        logger.info('resume done!')
    elif cfg.TRAIN.PRETRAIN:
        logger.info('load pretrain from {}.'.format(cfg.TRAIN.PRETRAIN_PATH))
        model = load_pretrain(model, cfg.TRAIN.PRETRAIN_PATH)
        logger.info('load pretrain done')
    dist_model = DistModule(model)
    train(train_dataloader, dist_model, optimizer, lr_scheduler)
Beispiel #3
0
def main():
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.META.LOG_DIR):
        os.makedirs(cfg.META.LOG_DIR)
    init_log("global", logging.INFO)
    if cfg.META.LOG_DIR:
        add_file_handler("global", os.path.join(cfg.META.LOG_DIR, "logs.txt"),
                         logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    model = MetaSiamModel().cuda()
    model = load_pretrain(model, cfg.META.PRETRAIN_PATH)
    # init meta train
    model.meta_train_init()
    # parametes want to optim
    optimizer = build_optimizer(model)
    dataloader = build_dataloader()
    meta_train(dataloader, optimizer, model)
Beispiel #4
0
def main():
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.GRAD.LOG_DIR):
        os.makedirs(cfg.GRAD.LOG_DIR)
    init_log("global", logging.INFO)
    if cfg.GRAD.LOG_DIR:
        add_file_handler("global", os.path.join(cfg.GRAD.LOG_DIR, "logs.txt"),
                         logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    model = get_model('GradSiamModel').cuda()
    model = load_pretrain(model, cfg.GRAD.PRETRAIN_PATH)
    # parametes want to optim
    optimizer = build_optimizer(model)
    dataloader = build_dataloader()
    if cfg.GRAD.RESUME:
        logger.info('resume from {}'.format(cfg.GRAD.RESUME_PATH))
        model, optimizer, cfg.GRAD.START_EPOCH = restore_from(
            model, optimizer, cfg.GRAD.RESUME_PATH)
        logger.info('resume done!')
    model.freeze_model()
    train(dataloader, optimizer, model)
Beispiel #5
0
def main():
    seed_torch(123456)
    cfg.merge_from_file(args.cfg)
    init_log('global', logging.INFO)

    base_model = get_model(cfg.MODEL_ARC)
    base_model = load_pretrain(base_model, args.snapshot).cuda().eval()
    # # if want test model pruned
    # base_model = prune_model(base_model).cuda().eval()  # refine the model

    # if want to test real pruning
    # base_model = get_model(cfg.MODEL_ARC)
    # base_model = load_pretrain(base_model, cfg.PRUNING.FINETUNE.PRETRAIN_PATH) # load the mask
    # base_model = prune_model(base_model) # refine the model
    # base_model=load_pretrain(base_model,args.snapshot).cuda().eval() # load the finetune weight

    tracker = get_tracker(args.tracker, base_model)
    data_dir = os.path.join(cfg.TRACK.DATA_DIR, args.dataset)
    dataset = get_dataset(args.dataset, data_dir)
    if args.dataset in ['VOT2016', 'VOT2018']:
        vot_evaluate(dataset, tracker)
    elif args.dataset == 'GOT-10k':
        ope_evaluate(dataset, tracker)
Beispiel #6
0
            prune_conv(block, prune_mask, last_mask)
    return model


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--cfg',
                        default='',
                        type=str,
                        help='which config file to use')
    parser.add_argument('--snapshot',
                        default='',
                        type=str,
                        help='which model to pruning')
    args = parser.parse_args()
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.PRUNING.LOG_DIR):
        os.makedirs(cfg.PRUNING.LOG_DIR)
    init_log('global', logging.INFO)
    if cfg.PRUNING.LOG_DIR:
        add_file_handler('global', os.path.join(cfg.PRUNING.LOG_DIR,
                                                'logs.txt'), logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    model = PruningSiamModel()
    model = load_pretrain(model, args.snapshot)

    for k, v in model.mask.items():
        print(k, v)
    model = prune_model(model)
Beispiel #7
0
    # torch.onnx.export(model,
    #                   (e0,search),
    #                   "pretrained_models/siamrpn_alex_search.onnx",
    #                   verbose=True,
    #                   input_names=['e0','search'],
    #                   output_names=['cls', 'loc'])
    # # examplar convert
    # # torch.onnx.export(model,
    # #                   examplar,
    # #                   'pretrained_models/siamrpn_alex_examplar.onnx',
    # #                   verbose=True,
    # #                   input_names=['examplar'],
    # #                   output_names=['e0'])
    #

    cfg.merge_from_file('configs/mobilenetv2_pruning.yaml')
    pretrained_path = './snapshot/mobilenetv2_sfp_0_75_new/checkpoint_e2.pth'
    model = get_model('PruningSiamModel')
    model = load_pretrain(model, pretrained_path)  # load the mask
    model = prune_model(model).cuda().eval()  # refine the model
    examplar = torch.randn(50, 3, 127, 127, device='cuda')
    search = torch.randn(50, 3, 255, 255, device='cuda')
    torch.onnx.export(model, (examplar, search),
                      'pretrained_models/siamrpn_mobi_pruning.onnx',
                      verbose=True,
                      input_names=['examplar', 'search'],
                      output_names=['cls', 'loc'])

    # test
    # torch.onnx.export(model,
    #                   examplar,
                key_prefix = key.split('.')[:-1]
                key_prefix[-1] = str(int(key_prefix[-1]) + 4)
                key_prefix = '.'.join(key_prefix)
                self._apply_bn_mask(model_params, key_prefix, mask)

    def _apply_bn_mask(self, model_params, key_prefix, mask):
        new_k = key_prefix + '.weight'
        if new_k in model_params.keys():
            model_params[new_k].data.mul_(mask)
        new_k = key_prefix + '.bias'
        if new_k in model_params.keys():
            model_params[new_k].data.mul_(mask)
        new_k = key_prefix + '.running_mean'
        if new_k in model_params.keys():
            model_params[new_k].data.mul_(mask)
        new_k = key_prefix + '.running_var'
        if new_k in model_params.keys():
            model_params[new_k].data.mul_(mask)

    def _apply_deepwise_mask(self, model_params, key_prefix, mask):
        new_k = key_prefix + '.weight'
        if new_k in model_params.keys():
            model_params[new_k].data.mul_(mask[:, None, None, None])


if __name__ == '__main__':
    cfg.merge_from_file('../configs/mobilenetv2_finetune.yaml')
    model = PruningSiamModel()
    for k, v in model.mask.items():
        print(k, v.size())
Beispiel #9
0
    except OSError as err:
        print(err)

    with open(result_path, 'w') as f:
        f.write('Occ')
    return False


if __name__ == '__main__':
    num_search = len(args.penalty_k) \
                 * len(args.window_influence) \
                 * len(args.lr) \
                 * len(args.search_region)
    print("Total search number: {}".format(num_search))

    cfg.merge_from_file(args.config)

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    dataset_root = os.path.join(cur_dir, '../testing_dataset', args.dataset)

    # create dataset
    data_dir = os.path.join(cfg.TRACK.DATA_DIR, args.dataset)
    dataset = get_dataset(args.dataset, data_dir)

    # create model
    model = get_model(cfg.MODEL_ARC)

    # load model
    model = load_pretrain(model, args.snapshot).cuda().eval()

    # build tracker