Exemplo n.º 1
0
def main():
    rank, world_size = dist_init()
    cfg.merge_from_file(args.cfg)
    if rank == 0:
        if not os.path.exists(cfg.TRAIN.LOG_DIR):
            os.makedirs(cfg.TRAIN.LOG_DIR)
        init_log('global', logging.INFO)
        if cfg.TRAIN.LOG_DIR:
            add_file_handler('global',
                             os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'),
                             logging.INFO)
        logger.info("Version Information: \n{}\n".format(commit()))
        logger.info("config \n{}".format(json.dumps(cfg, indent=4)))

    logger.info('dist init done!')
    train_dataloader = build_data_loader()
    model = get_model('BaseSiamModel').cuda().train()
    dist_model = DistModule(model)
    optimizer, lr_scheduler = build_optimizer_lr(dist_model.module,
                                                 cfg.TRAIN.START_EPOCH)
    if cfg.TRAIN.BACKBONE_PRETRAIN:
        logger.info('load backbone from {}.'.format(cfg.TRAIN.BACKBONE_PATH))
        model.backbone = load_pretrain(model.backbone, cfg.TRAIN.BACKBONE_PATH)
        logger.info('load backbone done!')
    if cfg.TRAIN.RESUME:
        logger.info('resume from {}'.format(cfg.TRAIN.RESUME_PATH))
        model, optimizer, cfg.TRAIN.START_EPOCH = restore_from(
            model, optimizer, cfg.TRAIN.RESUME_PATH)
        logger.info('resume done!')
    elif cfg.TRAIN.PRETRAIN:
        logger.info('load pretrain from {}.'.format(cfg.TRAIN.PRETRAIN_PATH))
        model = load_pretrain(model, cfg.TRAIN.PRETRAIN_PATH)
        logger.info('load pretrain done')
    dist_model = DistModule(model)
    train(train_dataloader, dist_model, optimizer, lr_scheduler)
Exemplo n.º 2
0
def main():
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.PRUNING.FINETUNE.LOG_DIR):
        os.makedirs(cfg.PRUNING.FINETUNE.LOG_DIR)
    init_log('global', logging.INFO)
    if cfg.PRUNING.FINETUNE.LOG_DIR:
        add_file_handler(
            'global', os.path.join(cfg.PRUNING.FINETUNE.LOG_DIR, 'logs.txt'),
            logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))

    train_dataloader = build_data_loader()
    model = PruningSiamModel()
    # load model from the pruning model
    logger.info('load pretrain from {}.'.format(
        cfg.PRUNING.FINETUNE.PRETRAIN_PATH))
    model = load_pretrain(model, cfg.PRUNING.FINETUNE.PRETRAIN_PATH)
    logger.info('load pretrain done')
    logger.info('begin to pruning the model')
    model = prune_model(model).cuda().train()
    logger.info('pruning finished!')

    optimizer, lr_scheduler = build_optimizer_lr(
        model, cfg.PRUNING.FINETUNE.START_EPOCH)
    if cfg.PRUNING.FINETUNE.RESUME:
        logger.info('resume from {}'.format(cfg.PRUNING.FINETUNE.RESUME_PATH))
        model, optimizer, cfg.PRUNING.FINETUNE.START_EPOCH = restore_from(
            model, optimizer, cfg.PRUNING.FINETUNE.RESUME_PATH)
        logger.info('resume done!')
    train(train_dataloader, model, optimizer, lr_scheduler)
Exemplo n.º 3
0
def main():
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.META.LOG_DIR):
        os.makedirs(cfg.META.LOG_DIR)
    init_log("global", logging.INFO)
    if cfg.META.LOG_DIR:
        add_file_handler("global", os.path.join(cfg.META.LOG_DIR, "logs.txt"),
                         logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    model = MetaSiamModel().cuda()
    model = load_pretrain(model, cfg.META.PRETRAIN_PATH)
    # init meta train
    model.meta_train_init()
    # parametes want to optim
    optimizer = build_optimizer(model)
    dataloader = build_dataloader()
    meta_train(dataloader, optimizer, model)
Exemplo n.º 4
0
def main():
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.GRAD.LOG_DIR):
        os.makedirs(cfg.GRAD.LOG_DIR)
    init_log("global", logging.INFO)
    if cfg.GRAD.LOG_DIR:
        add_file_handler("global", os.path.join(cfg.GRAD.LOG_DIR, "logs.txt"),
                         logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    model = get_model('GradSiamModel').cuda()
    model = load_pretrain(model, cfg.GRAD.PRETRAIN_PATH)
    # parametes want to optim
    optimizer = build_optimizer(model)
    dataloader = build_dataloader()
    if cfg.GRAD.RESUME:
        logger.info('resume from {}'.format(cfg.GRAD.RESUME_PATH))
        model, optimizer, cfg.GRAD.START_EPOCH = restore_from(
            model, optimizer, cfg.GRAD.RESUME_PATH)
        logger.info('resume done!')
    model.freeze_model()
    train(dataloader, optimizer, model)
Exemplo n.º 5
0
def main():
    seed_torch(123456)
    cfg.merge_from_file(args.cfg)
    init_log('global', logging.INFO)

    base_model = get_model(cfg.MODEL_ARC)
    base_model = load_pretrain(base_model, args.snapshot).cuda().eval()
    # # if want test model pruned
    # base_model = prune_model(base_model).cuda().eval()  # refine the model

    # if want to test real pruning
    # base_model = get_model(cfg.MODEL_ARC)
    # base_model = load_pretrain(base_model, cfg.PRUNING.FINETUNE.PRETRAIN_PATH) # load the mask
    # base_model = prune_model(base_model) # refine the model
    # base_model=load_pretrain(base_model,args.snapshot).cuda().eval() # load the finetune weight

    tracker = get_tracker(args.tracker, base_model)
    data_dir = os.path.join(cfg.TRACK.DATA_DIR, args.dataset)
    dataset = get_dataset(args.dataset, data_dir)
    if args.dataset in ['VOT2016', 'VOT2018']:
        vot_evaluate(dataset, tracker)
    elif args.dataset == 'GOT-10k':
        ope_evaluate(dataset, tracker)
Exemplo n.º 6
0

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--cfg',
                        default='',
                        type=str,
                        help='which config file to use')
    parser.add_argument('--snapshot',
                        default='',
                        type=str,
                        help='which model to pruning')
    args = parser.parse_args()
    cfg.merge_from_file(args.cfg)
    if not os.path.exists(cfg.PRUNING.LOG_DIR):
        os.makedirs(cfg.PRUNING.LOG_DIR)
    init_log('global', logging.INFO)
    if cfg.PRUNING.LOG_DIR:
        add_file_handler('global', os.path.join(cfg.PRUNING.LOG_DIR,
                                                'logs.txt'), logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    model = PruningSiamModel()
    model = load_pretrain(model, args.snapshot)

    for k, v in model.mask.items():
        print(k, v)
    model = prune_model(model)

    # torch.save(model.state_dict(), './snapshot/mobilenetv2_gdp/model_pruning.pth')
Exemplo n.º 7
0
    #                   verbose=True,
    #                   input_names=['e0','search'],
    #                   output_names=['cls', 'loc'])
    # # examplar convert
    # # torch.onnx.export(model,
    # #                   examplar,
    # #                   'pretrained_models/siamrpn_alex_examplar.onnx',
    # #                   verbose=True,
    # #                   input_names=['examplar'],
    # #                   output_names=['e0'])
    #

    cfg.merge_from_file('configs/mobilenetv2_pruning.yaml')
    pretrained_path = './snapshot/mobilenetv2_sfp_0_75_new/checkpoint_e2.pth'
    model = get_model('PruningSiamModel')
    model = load_pretrain(model, pretrained_path)  # load the mask
    model = prune_model(model).cuda().eval()  # refine the model
    examplar = torch.randn(50, 3, 127, 127, device='cuda')
    search = torch.randn(50, 3, 255, 255, device='cuda')
    torch.onnx.export(model, (examplar, search),
                      'pretrained_models/siamrpn_mobi_pruning.onnx',
                      verbose=True,
                      input_names=['examplar', 'search'],
                      output_names=['cls', 'loc'])

    # test
    # torch.onnx.export(model,
    #                   examplar,
    #                   'pretrained_models/siamrpn_mobi_pruning_examplar_test.onnx',
    #                   verbose=True,
    #                   input_names=['examplar'],
Exemplo n.º 8
0
    print("Total search number: {}".format(num_search))

    cfg.merge_from_file(args.config)

    cur_dir = os.path.dirname(os.path.realpath(__file__))
    dataset_root = os.path.join(cur_dir, '../testing_dataset', args.dataset)

    # create dataset
    data_dir = os.path.join(cfg.TRACK.DATA_DIR, args.dataset)
    dataset = get_dataset(args.dataset, data_dir)

    # create model
    model = get_model(cfg.MODEL_ARC)

    # load model
    model = load_pretrain(model, args.snapshot).cuda().eval()

    # build tracker
    tracker_name = 'SiamRPN'
    tracker = get_tracker(tracker_name, model)

    backbone_name=args.snapshot.split('/')[-2]
    snapshot_name = args.snapshot.split('/')[-1].split('.')[0]
    benchmark_path = os.path.join('hp_search_result', args.dataset)
    seqs = list(range(len(dataset)))
    np.random.shuffle(seqs)
    for idx in seqs:
        video = dataset[idx]
        video.read_imgs()
        # load image
        np.random.shuffle(args.penalty_k)
Exemplo n.º 9
0
def main(args):
    cfg_from_file(args.config)
    cfg.save_name = args.save_name
    cfg.save_path = args.save_path
    cfg.resume_file = args.resume_file
    cfg.config = args.config
    cfg.batch_size = args.batch_size
    cfg.num_workers = args.num_workers
    save_path = join(args.save_path, args.save_name)
    if not exists(save_path):
        makedirs(save_path)
    resume_file = args.resume_file
    init_log('global', logging.INFO)
    add_file_handler('global', os.path.join(save_path, 'logs.txt'),
                     logging.INFO)
    logger.info("Version Information: \n{}\n".format(commit()))
    logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
    start_epoch = 0

    model = ModelBuilder().cuda()
    if cfg.backbone.pretrained:
        load_pretrain(model.backbone,
                      join('pretrained_net', cfg.backbone.pretrained))

    train_dataset = Datasets()
    val_dataset = Datasets(is_train=False)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.num_workers,
                                               pin_memory=False,
                                               drop_last=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.num_workers,
                                             pin_memory=False,
                                             drop_last=True)

    if resume_file:
        if isfile(resume_file):
            logger.info("=> loading checkpoint '{}'".format(resume_file))
            model, start_epoch = restore_from(model, resume_file)
            start_epoch = start_epoch + 1
            for i in range(start_epoch):
                train_loader.dataset.shuffle()
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                resume_file, start_epoch - 1))
        else:
            logger.info("=> no checkpoint found at '{}'".format(resume_file))

    ngpus = torch.cuda.device_count()
    is_dataparallel = False
    if ngpus > 1:
        model = torch.nn.DataParallel(model, list(range(ngpus))).cuda()
        is_dataparallel = True

    if is_dataparallel:
        optimizer, lr_scheduler = build_opt_lr(model.module, start_epoch)
    else:
        optimizer, lr_scheduler = build_opt_lr(model, start_epoch)

    logger.info(lr_scheduler)
    logger.info("model prepare done")

    if args.log:
        writer = SummaryWriter(comment=args.save_name)

    for epoch in range(start_epoch, cfg.train.epoch):
        train_loader.dataset.shuffle()
        if (epoch == np.array(cfg.backbone.unfix_steps)
            ).sum() > 0 or epoch == cfg.train.pretrain_epoch:
            if is_dataparallel:
                optimizer, lr_scheduler = build_opt_lr(model.module, epoch)
            else:
                optimizer, lr_scheduler = build_opt_lr(model, epoch)
        lr_scheduler.step(epoch)
        record_dict_train = train(train_loader, model, optimizer, epoch)
        record_dict_val = validate(val_loader, model, epoch)
        message = 'Train Epoch: [{0}]\t'.format(epoch)
        for k, v in record_dict_train.items():
            message += '{name:s} {loss:.4f}\t'.format(name=k, loss=v)
        logger.info(message)
        message = 'Val Epoch: [{0}]\t'.format(epoch)
        for k, v in record_dict_val.items():
            message += '{name:s} {loss:.4f}\t'.format(name=k, loss=v)
        logger.info(message)

        if args.log:
            for k, v in record_dict_train.items():
                writer.add_scalar('train/' + k, v, epoch)
            for k, v in record_dict_val.items():
                writer.add_scalar('val/' + k, v, epoch)
        if is_dataparallel:
            save_checkpoint(
                {
                    'epoch': epoch,
                    'state_dict': model.module.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'cfg': cfg
                }, epoch, save_path)
        else:
            save_checkpoint(
                {
                    'epoch': epoch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'cfg': cfg
                }, epoch, save_path)
Exemplo n.º 10
0
    cfg.TRAIN.BATCH_SIZE = 1
    cfg.TRAIN.OUTPUT_SIZE=25
    quan_dataset_dir = '/home/keyan/NewDisk/ZhangXiong/quant_dataset'
    if not os.path.isdir(quan_dataset_dir):
        os.makedirs(quan_dataset_dir)
    train_dataset = TrainDataset()

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=cfg.TRAIN.BATCH_SIZE,
                                  num_workers=cfg.TRAIN.NUM_WORKERS,
                                  pin_memory=True
                                  )
    train_dataloader.dataset.shuffle()

    base_model = get_model('PruningSiamModel')
    base_model = load_pretrain(base_model, cfg.PRUNING.FINETUNE.PRETRAIN_PATH) # load the mask
    base_model = prune_model(base_model).cuda() # refine the model
    base_model.eval()

    for idx, data in enumerate(train_dataloader):
        print(idx)
        if idx > 5000:
            break
        examplar_img = data['examplar_img'].cuda()
        search_img = data['search_img'].cuda()
        bbox = data['bbox'].cpu().numpy()
        gt_cls = data['gt_cls'].cuda()
        gt_delta = data['gt_delta'].cuda()
        gt_delta_weight = data['delta_weight'].cuda()

        # np.set_printoptions(threshold=np.inf)