Exemple #1
0
    def __init__(self,
                 weights_path='./models/reid300.pt',
                 config_path='./models/reid300.yaml'):

        if not os.path.exists(config_path):
            raise ValueError("Invalid config path `" +
                             os.path.abspath(config_path) + "`")
        if not os.path.exists(weights_path):
            raise ValueError("Invalid weight path `" +
                             os.path.abspath(weights_path) + "`")

        cfg = get_default_config()
        cfg.merge_from_file(config_path)

        self.model = build_model(
            name=cfg.model.name,
            num_classes=1000,
            loss='am_softmax',
            feature_dim=cfg.model.feature_dim,
            fpn_cfg=cfg.model.fpn,
            pooling_type=cfg.model.pooling_type,
            input_size=(256, 128),
            IN_first=cfg.model.IN_first,
        )
        load_pretrained_weights(self.model, weights_path)
        self.model = self.model.cuda()
        self.model.eval()
        self.preprocess_mean = (torch.tensor([0.485, 0.456, 0.406]) *
                                255.0).cuda()
        self.preprocess_std = (torch.tensor([0.229, 0.224, 0.225]) *
                               255.0).cuda()
Exemple #2
0
def make_nncf_changes_in_training(model, cfg, classes, command_line_cfg_opts):
    if cfg.train.ema.enable:
        raise RuntimeError('EMA model could not be used together with NNCF compression')
    if cfg.lr_finder.enable:
        raise RuntimeError('LR finder could not be used together with NNCF compression')

    if cfg.model.resume:
        raise NotImplementedError('Resuming NNCF training is not implemented yet')
    if not cfg.model.load_weights:
        raise RuntimeError('NNCF training should be started from a pre-trained model')
    checkpoint_path = cfg.model.load_weights
    checkpoint_dict = load_checkpoint(checkpoint_path, map_location='cpu')
    if is_nncf_state(checkpoint_dict):
        raise RuntimeError(f'The checkpoint is NNCF checkpoint at {checkpoint_path}')

    logger.info(f'Loading weights from {checkpoint_path}')
    load_pretrained_weights(model, pretrained_dict=checkpoint_dict)
    datamanager_for_init = build_datamanager(cfg, classes)

    compression_ctrl, model, nncf_metainfo = \
        wrap_nncf_model(model, cfg, datamanager_for_init=datamanager_for_init)
    logger.info(f'Received from wrapping nncf_metainfo =\n{pformat(nncf_metainfo)}')

    # calculating initial LR for NNCF training
    lr = None
    initial_lr_from_checkpoint = checkpoint_dict.get('initial_lr')
    is_initial_lr_set_from_opts = is_config_parameter_set_from_command_line(command_line_cfg_opts, 'train.lr')
    lr = calculate_lr_for_nncf_training(cfg, initial_lr_from_checkpoint,
                                        is_initial_lr_set_from_opts)
    assert lr is not None
    cfg.train.lr = lr
    return compression_ctrl, model, cfg, lr, nncf_metainfo
Exemple #3
0
def main():
    # 1. Get input arguments
    args = get_args()

    # 2. Create config instance from args above
    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    # 3. Create DataManager Instance
    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        name=cfg.model.name,
        num_classes=datamanager.num_train_pids,
        loss=cfg.loss.name,
        pretrained=cfg.model.pretrained,
        use_gpu=cfg.use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler)

    print('Building {}-engine for {}-reid'.format(cfg.loss.name,
                                                  cfg.data.type))

    # Build engine and run
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
Exemple #4
0
def init_pretrained_weights(model, key='mobilenetv3', **kwargs):
    """Initializes model with pretrained weights.
    """
    from torchreid.utils import load_pretrained_weights

    link_to_weights = pretrained_urls[key]
    load_pretrained_weights(model, link_to_weights, key, **kwargs)
Exemple #5
0
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', '-c', type=str, required=True)
    parser.add_argument('--root', '-r', type=str, required=True)
    parser.add_argument('--save-dir', type=str, default='log')
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert osp.exists(args.config_file)
    assert osp.exists(args.root)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    data_loader, num_pids = prepare_data(cfg, mode='gallery')

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, num_pids))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = model.cuda()

    visualize_activation_map(model, data_loader, args.save_dir, cfg.data.width,
                             cfg.data.height, cfg.use_gpu)
    def _load_model(self,
                    model: ModelEntity,
                    device: torch.device,
                    pretrained_dict: Optional[Dict] = None):
        if model is not None:
            # If a model has been trained and saved for the task already, create empty model and load weights here
            if pretrained_dict is None:
                buffer = io.BytesIO(model.get_data("weights.pth"))
                model_data = torch.load(buffer,
                                        map_location=torch.device('cpu'))
            else:
                model_data = pretrained_dict

            model = self._create_model(self._cfg, from_scratch=True)

            try:
                load_pretrained_weights(model, pretrained_dict=model_data)
                logger.info("Loaded model weights from Task Environment")
            except BaseException as ex:
                raise ValueError("Could not load the saved model. The model file structure is invalid.") \
                    from ex
        else:
            # If there is no trained model yet, create model with pretrained weights as defined in the model config
            # file.
            model = self._create_model(self._cfg, from_scratch=False)
            logger.info(
                "No trained model in project yet. Created new model with general-purpose pretrained weights."
            )
        return model.to(device)
Exemple #7
0
def build_auxiliary_model(config_file,
                          num_classes,
                          use_gpu,
                          device_ids,
                          num_iter,
                          lr=None,
                          nncf_aux_config_changes=None,
                          aux_config_opts=None,
                          aux_pretrained_dict=None):
    aux_cfg = get_default_config()
    aux_cfg.use_gpu = use_gpu
    merge_from_files_with_base(aux_cfg, config_file)
    if nncf_aux_config_changes:
        print(
            f'applying to aux config changes from NNCF aux config {nncf_aux_config_changes}'
        )
        if not isinstance(nncf_aux_config_changes, CfgNode):
            nncf_aux_config_changes = CfgNode(nncf_aux_config_changes)
        aux_cfg.merge_from_other_cfg(nncf_aux_config_changes)
    if aux_config_opts:
        print(f'applying to aux config changes from command line arguments, '
              f'the changes are:\n{pformat(aux_config_opts)}')
        aux_cfg.merge_from_list(aux_config_opts)

    print(f'\nShow auxiliary configuration\n{aux_cfg}\n')

    if lr is not None:
        aux_cfg.train.lr = lr
        print(f"setting learning rate from main model: {lr}")
    model = torchreid.models.build_model(**model_kwargs(aux_cfg, num_classes))
    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(aux_cfg))
    scheduler = torchreid.optim.build_lr_scheduler(
        optimizer=optimizer, num_iter=num_iter, **lr_scheduler_kwargs(aux_cfg))

    if aux_cfg.model.resume and check_isfile(aux_cfg.model.resume):
        aux_cfg.train.start_epoch = resume_from_checkpoint(
            aux_cfg.model.resume,
            model,
            optimizer=optimizer,
            scheduler=scheduler)

    elif aux_pretrained_dict is not None:
        load_pretrained_weights(model, pretrained_dict=aux_pretrained_dict)

    elif aux_cfg.model.load_weights and check_isfile(
            aux_cfg.model.load_weights):
        load_pretrained_weights(model, aux_cfg.model.load_weights)

    if aux_cfg.use_gpu:
        assert device_ids is not None

        if len(device_ids) > 1:
            model = DataParallel(model, device_ids=device_ids,
                                 output_device=0).cuda(device_ids[0])
        else:
            model = model.cuda(device_ids[0])

    return model, optimizer, scheduler
Exemple #8
0
def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = torch.cuda.is_available() and not args.use_cpu
    log_name = 'test.log' if args.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('** Arguments **')
    arg_keys = list(args.__dict__.keys())
    arg_keys.sort()
    for key in arg_keys:
        print('{}: {}'.format(key, args.__dict__[key]))
    print('\n')
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))
    if use_gpu:
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    datamanager = build_datamanager(args)

    print('Building model: {}'.format(args.arch))
    model = torchreid.models.build_model(
        name=args.arch,
        num_classes=datamanager.num_train_pids,
        loss=args.loss.lower(),
        pretrained=(not args.no_pretrained),
        use_gpu=use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, args.height, args.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(args))

    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(args))

    if args.resume and check_isfile(args.resume):
        args.start_epoch = resume_from_checkpoint(args.resume,
                                                  model,
                                                  optimizer=optimizer)

    print('Building {}-engine for {}-reid'.format(args.loss, args.app))
    engine = build_engine(args, datamanager, model, optimizer, scheduler)

    engine.run(**engine_run_kwargs(args))
Exemple #9
0
def main():

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument('--output-name', type=str, default='model')
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='Verbose mode for onnx.export')
    args = parser.parse_args()

    cfg = get_default_config()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    cfg.freeze()

    model = build_model(
        name=cfg.model.name,
        num_classes=1041,  # Does not matter in conversion
        loss=cfg.loss.name,
        pretrained=False,
        use_gpu=True,
        feature_dim=cfg.model.feature_dim,
        fpn=cfg.model.fpn,
        fpn_dim=cfg.model.fpn_dim,
        gap_as_conv=cfg.model.gap_as_conv,
        input_size=(cfg.data.height, cfg.data.width),
        IN_first=cfg.model.IN_first)

    load_pretrained_weights(model, cfg.model.load_weights)
    model.eval()

    _, transform = build_transforms(cfg.data.height,
                                    cfg.data.width,
                                    transforms=cfg.data.transforms,
                                    norm_mean=cfg.data.norm_mean,
                                    norm_std=cfg.data.norm_std,
                                    apply_masks_to_test=False)

    input_size = (cfg.data.height, cfg.data.width, 3)
    img = np.random.rand(*input_size).astype(np.float32)
    img = np.uint8(img * 255)
    im = Image.fromarray(img)
    blob = transform(im).unsqueeze(0)

    torch.onnx.export(
        model,
        blob,
        args.output_name + '.onnx',
        verbose=False,
        export_params=True,
        input_names=['data'],
        output_names=['reid_embedding'],
        opset_version=9)  # 9th version resolves nearest upsample issue
Exemple #10
0
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', help='path to config file')
    parser.add_argument('-s', '--sources', type=str, nargs='+', help='source datasets (delimited by space)')
    parser.add_argument('-t', '--targets', type=str, nargs='+', help='target datasets (delimited by space)')
    parser.add_argument('--root', type=str, default='', help='path to data root')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = build_datamanager(cfg)

    print('Building model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(**model_kwargs(cfg, datamanager.num_train_pids))
    num_params, flops = compute_model_complexity(model, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if cfg.model.load_weights and check_isfile(cfg.model.load_weights):
        if cfg.model.pretrained and not cfg.test.evaluate:
            state_dict = torch.load(cfg.model.load_weights)
            model.load_pretrained_weights(state_dict)
        else:
            load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer, **lr_scheduler_kwargs(cfg))

    if cfg.model.resume and check_isfile(cfg.model.resume):
        cfg.train.start_epoch = resume_from_checkpoint(
            cfg.model.resume, model, optimizer=optimizer, scheduler=scheduler
        )

    print('Building {}-engine for {}-reid'.format(cfg.loss.name, cfg.data.type))
    engine = build_engine(cfg, datamanager, model, optimizer, scheduler)
    engine.run(**engine_run_kwargs(cfg))
 def _create_model(self, config, from_scratch: bool = False):
     """
     Creates a model, based on the configuration in config
     :param config: deep-object-reid configuration from which the model has to be built
     :param from_scratch: bool, if True does not load any weights
     :return model: Model in training mode
     """
     num_train_classes = len(self._labels)
     model = torchreid.models.build_model(
         **model_kwargs(config, num_train_classes))
     if self._cfg.model.load_weights and not from_scratch:
         load_pretrained_weights(model, self._cfg.model.load_weights)
     return model
Exemple #12
0
def main():
    global args

    set_random_seed(args.seed)
    if not args.use_avai_gpus:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
    use_gpu = (torch.cuda.is_available() and not args.use_cpu)
    log_name = 'test.log' if args.evaluate else 'train.log'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))
    print('==========\nArgs:{}\n=========='.format(args))
    if use_gpu:
        print('Currently using GPU {}'.format(args.gpu_devices))
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    datamanager = build_datamanager(args)

    print('Building model: {}'.format(args.arch))
    model = torchreid.models.build_model(
        name=args.arch,
        num_classes=datamanager.num_train_pids,
        loss=args.loss.lower(),
        pretrained=(not args.no_pretrained),
        use_gpu=use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, args.height, args.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if use_gpu:
        model = nn.DataParallel(model).cuda()

    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(args))

    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(args))

    if args.resume and check_isfile(args.resume):
        args.start_epoch = resume_from_checkpoint(args.resume,
                                                  model,
                                                  optimizer=optimizer)

    print('Building {}-engine for {}-reid'.format(args.loss, args.app))
    engine = build_engine(args, datamanager, model, optimizer, scheduler)

    engine.run(**engine_run_kwargs(args))
    def __init__(
        self,
        model_name='',
        model_path='',
        image_size=(256, 128),
        pixel_mean=[0.485, 0.456, 0.406],
        pixel_std=[0.229, 0.224, 0.225],
        pixel_norm=True,
        device='cuda',
        verbose=True
    ):
        # Build model
        model = build_model(
            model_name,
            num_classes=1,
            pretrained=True,
            use_gpu=device.startswith('cuda')
        )
        model.eval()

        num_params, flops = compute_model_complexity(
            model, (1, 3, image_size[0], image_size[1])
        )

        if verbose:
            print('Model: {}'.format(model_name))
            print('- params: {:,}'.format(num_params))
            print('- flops: {:,}'.format(flops))

        if model_path and check_isfile(model_path):
            load_pretrained_weights(model, model_path)

        # Build transform functions
        transforms = []
        transforms += [T.Resize(image_size)]
        transforms += [T.ToTensor()]
        if pixel_norm:
            transforms += [T.Normalize(mean=pixel_mean, std=pixel_std)]
        preprocess = T.Compose(transforms)

        to_pil = T.ToPILImage()

        device = torch.device(device)
        model.to(device)

        # Class attributes
        self.model = model
        self.preprocess = preprocess
        self.to_pil = to_pil
        self.device = device
Exemple #14
0
    def __init__(self,
                 config_path='',
                 model_path='',
                 device='cuda',
                 verbose=True):
        # Build model
        cfg = get_default_config()
        merge_from_files_with_base(cfg, config_path)
        cfg.use_gpu = device.startswith('cuda')
        model = build_model(**model_kwargs(cfg, 1))
        model.eval()

        image_size = (cfg.data.height, cfg.data.width)
        flops, num_params = get_model_complexity_info(
            model, (3, image_size[0], image_size[1]),
            as_strings=False,
            verbose=False,
            print_per_layer_stat=False)

        if verbose:
            print('Model: {}'.format(cfg.model.name))
            print('- params: {:,}'.format(num_params))
            print('- flops: {:,}'.format(flops))

        if model_path and check_isfile(model_path):
            load_pretrained_weights(model, model_path)

        # Build transform functions
        transforms = []
        transforms += [T.Resize(image_size)]
        transforms += [T.ToTensor()]
        print(cfg.data.norm_mean, cfg.data.norm_std)
        transforms += [
            T.Normalize(mean=cfg.data.norm_mean, std=cfg.data.norm_std)
        ]
        preprocess = T.Compose(transforms)

        to_pil = T.ToPILImage()

        device = torch.device(device)
        model.to(device)

        # Class attributes
        self.model = model
        self.preprocess = preprocess
        self.to_pil = to_pil
        self.device = device
Exemple #15
0
def main():

    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument('--output_name', type=str, default='model')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)

    model = torchreid.models.build_model(name=cfg.model.name,
                                         num_classes=2,
                                         loss=cfg.loss.name,
                                         pretrained=cfg.model.pretrained,
                                         use_gpu=cfg.use_gpu,
                                         dropout_prob=cfg.model.dropout_prob,
                                         feature_dim=cfg.model.feature_dim,
                                         activation=cfg.model.activation,
                                         in_first=cfg.model.in_first)
    load_pretrained_weights(model, cfg.model.load_weights)
    model.eval()

    _, transform = build_transforms(cfg.data.height, cfg.data.width)

    input_size = (cfg.data.height, cfg.data.width, 3)
    img = np.random.rand(*input_size).astype(np.float32)
    img = np.uint8(img * 255)
    im = Image.fromarray(img)
    blob = transform(im).unsqueeze(0)

    torch.onnx.export(model,
                      blob,
                      args.output_name + '.onnx',
                      verbose=True,
                      export_params=True)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--root',
        type=str,
        default=
        '/media/ddj2/ce611f70-968b-4316-9547-9bc9cf931d32/V20200108/zhejiang_train'
    )
    parser.add_argument('-d', '--dataset', type=str, default='rock_dataset')
    parser.add_argument('-m', '--model', type=str, default='abd_resnet')
    parser.add_argument('--weights', type=str)
    parser.add_argument('--save-dir', type=str, default='log/resnet50_cam')
    parser.add_argument('--height', type=int, default=672)
    parser.add_argument('--width', type=int, default=672)
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = "3"

    use_gpu = torch.cuda.is_available()
    torchreid.data.register_image_dataset(
        'rock_dataset', torchreid.data.datasets.image.rock_dataset.RockDataSet)

    datamanager = torchreid.data.ImageDataManager(
        root=args.root,
        sources=args.dataset,
        height=args.height,
        width=args.width,
        batch_size_train=4,
        batch_size_test=4,
        transforms=None,
        train_sampler='SequentialSampler')
    test_loader = datamanager.test_loader

    model = torchreid.models.build_model(
        name=args.model,
        num_classes=datamanager.num_train_pids,
        use_gpu=use_gpu)

    if use_gpu:
        model = model.cuda()

    if args.weights and check_isfile(args.weights):
        load_pretrained_weights(model, args.weights)

    visactmap(model, test_loader, args.save_dir, args.width, args.height,
              use_gpu)
Exemple #17
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root', type=str, default='')
    parser.add_argument('-m', '--model', type=str, default='resnet50')
    parser.add_argument(
        '--weights',
        type=str,
        default=
        '/media/ddj2/ce611f70-968b-4316-9547-9bc9cf931d32/remote_data/PycharmProjects/ABD-Net-master/model_best.pth.tar'
    )
    parser.add_argument('--save-dir', type=str, default='logs/resnet50')
    parser.add_argument('--height', type=int, default=672)
    parser.add_argument('--width', type=int, default=672)
    args = parser.parse_args()

    use_gpu = torch.cuda.is_available()

    test_dir = '/media/ddj2/ce611f70-968b-4316-9547-9bc9cf931d32/测试集/ceshi/crop/浙江省温州苍南县西古庵早白垩世小平田组PM201(挑选3张泛化测试用)20200114'
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    test_dataset = datasets.ImageFolder(
        test_dir, transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=8,
                                              shuffle=False,
                                              num_workers=8,
                                              pin_memory=True)

    model = torchreid.models.build_model(name=args.model,
                                         num_classes=70,
                                         use_gpu=use_gpu)

    if use_gpu:
        model = model.cuda()

    if args.weights and check_isfile(args.weights):
        load_pretrained_weights(model, args.weights)

    visactmap(model, test_loader, args.save_dir, args.width, args.height,
              use_gpu)
Exemple #18
0
def test_single(num_attrs=26):
    global args, best_accu
    args = parser.parse_args()
    attr_num = attr_nums['pa100k']
    # create model
    model = models.build_model(args.arch,
                               num_attrs,
                               pretrained=not args.no_pretrained,
                               use_gpu=True)
    load_pretrained_weights(model, args.load_weights)
    model = model.cuda()
    model.eval()

    img_path = "/home/bavon/face_test/reid/c1.jpg"
    output = get_attr(img_path, model)
    print("output is:{}".format(output))
    for it in range(attr_num):
        if output[it] == 1:
            print('{} '.format(description['pa100k'][it]))
def main():
    parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config', '-c', type=str, required=True)
    parser.add_argument('opts', default=None, nargs=REMAINDER)
    args = parser.parse_args()

    assert osp.exists(args.config)

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    merge_from_files_with_base(cfg, args.config)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    model = torchreid.models.build_model(**model_kwargs(cfg, [0, 0]))
    load_pretrained_weights(model, cfg.model.load_weights)

    conv_layers = collect_conv_layers(model)
    show_stat(conv_layers)
Exemple #20
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--root', type=str)
    parser.add_argument('-d', '--dataset', type=str, default='market1501')
    parser.add_argument('-m', '--model', type=str, default='osnet_x1_0')
    parser.add_argument('--weights', type=str)
    parser.add_argument('--save-dir', type=str, default='log')
    parser.add_argument('--height', type=int, default=256)
    parser.add_argument('--width', type=int, default=128)
    args = parser.parse_args()

    use_gpu = torch.cuda.is_available()

    datamanager = torchreid.data.ImageDataManager(
        root=args.root,
        sources=args.dataset,
        height=args.height,
        width=args.width,
        batch_size_train=100,
        batch_size_test=100,
        transforms=None,
        train_sampler='SequentialSampler'
    )
    test_loader = datamanager.test_loader

    model = torchreid.models.build_model(
        name=args.model,
        num_classes=datamanager.num_train_pids,
        use_gpu=use_gpu
    )

    if use_gpu:
        model = model.cuda()

    if args.weights and check_isfile(args.weights):
        load_pretrained_weights(model, args.weights)

    visactmap(
        model, test_loader, args.save_dir, args.width, args.height, use_gpu
    )
Exemple #21
0
def test_dis(num_attrs=26):
    global args, best_accu
    args = parser.parse_args()
    attr_num = attr_nums['pa100k']
    # create model
    model = models.build_model(args.arch,
                               num_attrs,
                               pretrained=not args.no_pretrained,
                               use_gpu=True)
    load_pretrained_weights(model, args.load_weights)
    model = model.cuda()
    model.eval()

    img1_path = '/home/bavon/face_test/reid/yuexin.jpg'
    img1_path = '/home/bavon/face_test/reid/d1.jpg'
    img2_path = '/home/bavon/model/datasets/rap/RAP_dataset/CAM17_2014-02-20_20140220175154-20140220175854_tarid124_frame2893_line1.png'
    img2_path = '/home/bavon/face_test/reid/yangyi.jpg'
    img2_path = '/home/bavon/face_test/reid/yuexin3.jpg'
    img2_path = '/home/bavon/face_test/reid/d2.jpg'
    output1 = get_attr(img1_path, model)
    output2 = get_attr(img2_path, model)
    dist = np.linalg.norm(output1 - output2)
    print("distance is:{}".format(dist))
def download_model(net,
                   model_name,
                   local_model_store_dir_path=os.path.join(
                       "~", ".torch", "models")):
    """
    Load model state dictionary from a file with downloading it if necessary.
    Parameters
    ----------
    net : Module
        Network in which weights are loaded.
    model_name : str
        Name of the model.
    local_model_store_dir_path : str, default $TORCH_HOME/models
        Location for keeping the model parameters.
    ignore_extra : bool, default True
        Whether to silently ignore parameters from the file that are not present in this Module.
    """
    net = load_pretrained_weights(
        model=net,
        file_path=get_model_file(
            model_name=model_name,
            local_model_store_dir_path=local_model_store_dir_path))

    return net
Exemple #23
0
def main():
    global args

    set_random_seed(args.seed)
    use_gpu = torch.cuda.is_available() and not args.use_cpu
    log_name = 'test.log' if args.evaluate else 'train.log'
    sys.stdout = Logger(osp.join(args.save_dir, log_name))

    print('** Arguments **')
    arg_keys = list(args.__dict__.keys())
    arg_keys.sort()
    for key in arg_keys:
        print('{}: {}'.format(key, args.__dict__[key]))
    print('\n')
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if use_gpu:
        torch.backends.cudnn.benchmark = True
    else:
        warnings.warn(
            'Currently using CPU, however, GPU is highly recommended')

    dataset_vars = init_dataset(use_gpu)
    trainloader, valloader, testloader, num_attrs, attr_dict = dataset_vars

    if args.weighted_bce:
        print('Use weighted binary cross entropy')
        print('Computing the weights ...')
        bce_weights = torch.zeros(num_attrs, dtype=torch.float)
        for _, attrs, _ in trainloader:
            bce_weights += attrs.sum(0)  # sum along the batch dim
        bce_weights /= len(trainloader) * args.batch_size
        print('Sample ratio for each attribute: {}'.format(bce_weights))
        bce_weights = torch.exp(-1 * bce_weights)
        print('BCE weights: {}'.format(bce_weights))
        bce_weights = bce_weights.expand(args.batch_size, num_attrs)
        criterion = nn.BCEWithLogitsLoss(weight=bce_weights)

    else:
        print('Use plain binary cross entropy')
        criterion = nn.BCEWithLogitsLoss()

    print('Building model: {}'.format(args.arch))
    model = models.build_model(args.arch,
                               num_attrs,
                               pretrained=not args.no_pretrained,
                               use_gpu=use_gpu)
    num_params, flops = compute_model_complexity(
        model, (1, 3, args.height, args.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.load_weights and check_isfile(args.load_weights):
        load_pretrained_weights(model, args.load_weights)

    if use_gpu:
        model = nn.DataParallel(model).cuda()
        criterion = criterion.cuda()

    if args.evaluate:
        test(model, testloader, attr_dict, use_gpu)
        return

    optimizer = torchreid.optim.build_optimizer(model,
                                                **optimizer_kwargs(args))
    scheduler = torchreid.optim.build_lr_scheduler(optimizer,
                                                   **lr_scheduler_kwargs(args))

    start_epoch = args.start_epoch
    best_result = -np.inf
    if args.resume and check_isfile(args.resume):
        checkpoint = torch.load(args.resume)
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])
        start_epoch = checkpoint['epoch']
        best_result = checkpoint['label_mA']
        print('Loaded checkpoint from "{}"'.format(args.resume))
        print('- start epoch: {}'.format(start_epoch))
        print('- label_mA: {}'.format(best_result))

    time_start = time.time()

    for epoch in range(start_epoch, args.max_epoch):
        train(epoch, model, criterion, optimizer, scheduler, trainloader,
              use_gpu)
        test_outputs = test(model, testloader, attr_dict, use_gpu)
        label_mA = test_outputs[0]
        is_best = label_mA > best_result
        if is_best:
            best_result = label_mA

        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'epoch': epoch + 1,
                'label_mA': label_mA,
                'optimizer': optimizer.state_dict(),
            },
            args.save_dir,
            is_best=is_best)

    elapsed = round(time.time() - time_start)
    elapsed = str(datetime.timedelta(seconds=elapsed))
    print('Elapsed {}'.format(elapsed))
def main():
    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file', type=str, default='', required=True,
                        help='Path to config file')
    parser.add_argument('--output-name', type=str, default='model',
                        help='Path to save ONNX model')
    parser.add_argument('--num-classes', type=int, nargs='+', default=None)
    parser.add_argument('--opset', type=int, default=11)
    parser.add_argument('--verbose', action='store_true',
                        help='Verbose mode for onnx.export')
    parser.add_argument('--disable-dyn-axes', default=False, action='store_true')
    parser.add_argument('--export_ir', action='store_true')
    parser.add_argument('opts', default=None, nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg)
    cfg.merge_from_list(args.opts)

    compression_hyperparams = get_compression_hyperparams(cfg.model.load_weights)
    is_nncf_used = compression_hyperparams['enable_quantization'] or compression_hyperparams['enable_pruning']
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg,
                                          compression_hyperparams['enable_quantization'],
                                          compression_hyperparams['enable_pruning'],
                                          args.opts)
    cfg.train.mix_precision = False
    cfg.freeze()
    num_classes = parse_num_classes(source_datasets=cfg.data.sources,
                                    classification=cfg.model.type == 'classification' or cfg.model.type == 'multilabel',
                                    num_classes=args.num_classes,
                                    snap_path=cfg.model.load_weights)
    model = build_model(**model_kwargs(cfg, num_classes))
    if cfg.model.load_weights:
        load_pretrained_weights(model, cfg.model.load_weights)
    else:
        warnings.warn("No weights are passed through 'load_weights' parameter! "
              "The model will be converted with random or pretrained weights", category=RuntimeWarning)
    if 'tresnet' in cfg.model.name:
        patch_InplaceAbn_forward()
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        model = make_nncf_changes_in_eval(model, cfg)
        print('End making NNCF changes in model')
    onnx_file_path = export_onnx(model=model.eval(),
                                 cfg=cfg,
                                 output_file_path=args.output_name,
                                 disable_dyn_axes=args.disable_dyn_axes,
                                 verbose=args.verbose,
                                 opset=args.opset,
                                 extra_check=True)
    if args.export_ir:
        input_shape = [1, 3, cfg.data.height, cfg.data.width]
        export_ir(onnx_model_path=onnx_file_path,
                  norm_mean=cfg.data.norm_mean,
                  norm_std=cfg.data.norm_std,
                  input_shape=input_shape,
                  optimized_model_dir=os.path.dirname(os.path.abspath(onnx_file_path)),
                  data_type='FP32')
 d = CrowdHuman(
     root='/mnt/lustre/share/fengweitao',
     meta_file=
     '/mnt/lustre/share/fengweitao/crowd_human/annotation_train.odgt')
 dl = torch.utils.data.DataLoader(d, batch_size=16, num_workers=4)
 print(dl)
 config_file = 'configs/im_osnet_x1_0_softmax_256x128_amsgrad_cosine.yaml'
 cfg = get_default_config()
 cfg.use_gpu = torch.cuda.is_available()
 cfg.merge_from_file(config_file)
 model = torchreid.models.build_model(name=cfg.model.name,
                                      num_classes=1024,
                                      loss=cfg.loss.name,
                                      pretrained=False,
                                      use_gpu=cfg.use_gpu)
 load_pretrained_weights(model, cfg.model.load_weights)
 # m = None
 model.eval()
 model.cuda()
 all_results = []
 for i, data in enumerate(dl):
     data = to_cuda(data, device='cuda')[0]
     print(data.keys())
     n, m, c, h, w = data['im'].shape
     indata = data['im'].reshape(n * m, c, h, w)
     with torch.no_grad():
         o = model(indata)
     o = o.reshape(n, m, -1)
     dm = 0.
     for j in range(o.size(1)):
         for k in range(o.size(1)):
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--model-name',
                        type=str,
                        default='',
                        help='Model name')
    parser.add_argument('--weights', type=str, default='', help='Weights path')
    parser.add_argument('--output',
                        type=str,
                        default='output',
                        help='Output path')
    parser.add_argument('--resolution',
                        type=str,
                        default='128x256',
                        help='Resolution (WxH)')

    args = parser.parse_args()
    width, height = [int(i) for i in args.resolution.split('x')]

    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    imagedata_kwargs = {
        'root': 'reid-data',
        'sources': ['market1501'],
        'targets': ['market1501'],
        'height': 256,
        'width': 128,
        'transforms': ['random_flip', 'color_jitter'],
        'norm_mean': [0.485, 0.456, 0.406],
        'norm_std': [0.229, 0.224, 0.225],
        'use_gpu': False,
        'split_id': 0,
        'combineall': False,
        'load_train_targets': False,
        'batch_size_train': 64,
        'batch_size_test': 300,
        'workers': 4,
        'num_instances': 4,
        'train_sampler': 'RandomSampler',
        'cuhk03_labeled': False,
        'cuhk03_classic_split': False,
        'market1501_500k': False
    }
    datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs)

    print('Building model: {}'.format(args.model_name))
    model = torchreid.models.build_model(
        name=args.model_name,
        num_classes=datamanager.num_train_pids,
        loss='softmax',
        pretrained=True,
        use_gpu=False)
    num_params, flops = compute_model_complexity(model, (1, 3, height, width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    if args.weights and check_isfile(args.weights):
        load_pretrained_weights(model, args.weights)
    _input = torch.Tensor(1, 3, height, width)
    inputs = (_input, )

    print('Converting PyTorch model to ONNX...')
    tmp = tempfile.mktemp(suffix='.onnx')
    torch.onnx._export(model, inputs, tmp, export_params=True)

    onnx_model = onnx.load(tmp)
    export_path = args.output

    onnx.checker.check_model(onnx_model)

    print('Prepare TF model...')
    tf_rep = prepare(onnx_model, strict=False)

    if path.exists(export_path):
        shutil.rmtree(export_path)

    with tf.Session() as persisted_sess:
        print("load graph")
        persisted_sess.graph.as_default()
        tf.import_graph_def(tf_rep.graph.as_graph_def(), name='')

        i_tensors = []
        o_tensors = []
        inputs = {}
        outputs = {}

        for i in tf_rep.inputs:
            t = persisted_sess.graph.get_tensor_by_name(
                tf_rep.tensor_dict[i].name)
            i_tensors.append(t)
            tensor_info = tf.saved_model.utils.build_tensor_info(t)
            inputs[t.name.split(':')[0].lower()] = tensor_info
            print('input tensor [name=%s, type=%s, shape=%s]' %
                  (t.name, t.dtype.name, t.shape.as_list()))
        print('')

        for i in tf_rep.outputs:
            t = persisted_sess.graph.get_tensor_by_name(
                tf_rep.tensor_dict[i].name)
            o_tensors.append(t)
            tensor_info = tf.saved_model.utils.build_tensor_info(t)
            outputs[t.name.split(':')[0]] = tensor_info
            print('output tensor [name=%s, type=%s, shape=%s]' %
                  (t.name, t.dtype.name, t.shape.as_list()))

        feed_dict = {}
        for i in i_tensors:
            feed_dict[i] = np.random.rand(*i.shape.as_list()).astype(
                i.dtype.name)

        print('test run:')
        res = persisted_sess.run(o_tensors, feed_dict=feed_dict)
        print(res)

        # print('INPUTS')
        # print(inputs)
        # print('OUTPUTS')
        # print(outputs)
        prediction_signature = (
            tf.saved_model.signature_def_utils.build_signature_def(
                inputs=inputs,
                outputs=outputs,
                method_name=tf.saved_model.signature_constants.
                PREDICT_METHOD_NAME))
        builder = tf.saved_model.builder.SavedModelBuilder(export_path)
        builder.add_meta_graph_and_variables(
            persisted_sess, [tf.saved_model.tag_constants.SERVING],
            signature_def_map={
                tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                prediction_signature
            })
        builder.save()
        print('Model saved to %s' % export_path)
Exemple #27
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config-file',
                        type=str,
                        default='',
                        help='path to config file')
    parser.add_argument('-s',
                        '--sources',
                        type=str,
                        nargs='+',
                        help='source datasets (delimited by space)')
    parser.add_argument('-t',
                        '--targets',
                        type=str,
                        nargs='+',
                        help='target datasets (delimited by space)')
    parser.add_argument('--transforms',
                        type=str,
                        nargs='+',
                        help='data augmentation')
    parser.add_argument('--root',
                        type=str,
                        default='',
                        help='path to data root')
    parser.add_argument('opts',
                        default=None,
                        nargs=argparse.REMAINDER,
                        help='Modify config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available()
    if args.config_file:
        cfg.merge_from_file(args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)
    set_random_seed(cfg.train.seed)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))
    print('Collecting env info ...')
    print('** System info **\n{}\n'.format(collect_env_info()))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    datamanager = torchreid.data.ImageDataManager(**imagedata_kwargs(cfg))

    print('Building model-1: {}'.format(cfg.model.name))
    model1 = torchreid.models.build_model(
        name=cfg.model.name,
        num_classes=datamanager.num_train_pids,
        loss=cfg.loss.name,
        pretrained=cfg.model.pretrained,
        use_gpu=cfg.use_gpu)
    num_params, flops = compute_model_complexity(
        model1, (1, 3, cfg.data.height, cfg.data.width))
    print('Model complexity: params={:,} flops={:,}'.format(num_params, flops))

    print('Copying model-1 to model-2')
    model2 = copy.deepcopy(model1)

    if cfg.model.load_weights1 and check_isfile(cfg.model.load_weights1):
        load_pretrained_weights(model1, cfg.model.load_weights1)

    if cfg.model.load_weights2 and check_isfile(cfg.model.load_weights2):
        load_pretrained_weights(model2, cfg.model.load_weights2)

    if cfg.use_gpu:
        model1 = nn.DataParallel(model1).cuda()
        model2 = nn.DataParallel(model2).cuda()

    optimizer1 = torchreid.optim.build_optimizer(model1,
                                                 **optimizer_kwargs(cfg))
    scheduler1 = torchreid.optim.build_lr_scheduler(optimizer1,
                                                    **lr_scheduler_kwargs(cfg))

    optimizer2 = torchreid.optim.build_optimizer(model2,
                                                 **optimizer_kwargs(cfg))
    scheduler2 = torchreid.optim.build_lr_scheduler(optimizer2,
                                                    **lr_scheduler_kwargs(cfg))

    if cfg.model.resume1 and check_isfile(cfg.model.resume1):
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume1,
                                                       model1,
                                                       optimizer=optimizer1,
                                                       scheduler=scheduler1)

    if cfg.model.resume2 and check_isfile(cfg.model.resume2):
        resume_from_checkpoint(cfg.model.resume2,
                               model2,
                               optimizer=optimizer2,
                               scheduler=scheduler2)

    print('Building DML-engine for image-reid')
    engine = ImageDMLEngine(datamanager,
                            model1,
                            optimizer1,
                            scheduler1,
                            model2,
                            optimizer2,
                            scheduler2,
                            margin=cfg.loss.triplet.margin,
                            weight_t=cfg.loss.triplet.weight_t,
                            weight_x=cfg.loss.triplet.weight_x,
                            weight_ml=cfg.loss.dml.weight_ml,
                            use_gpu=cfg.use_gpu,
                            label_smooth=cfg.loss.softmax.label_smooth,
                            deploy=cfg.model.deploy)
    engine.run(**engine_run_kwargs(cfg))
Exemple #28
0
def main():
    parser = build_base_argparser()
    parser.add_argument('-e',
                        '--auxiliary-models-cfg',
                        type=str,
                        nargs='*',
                        default='',
                        help='path to extra config files')
    parser.add_argument('--split-models',
                        action='store_true',
                        help='whether to split models on own gpu')
    parser.add_argument('--enable_quantization',
                        action='store_true',
                        help='Enable NNCF quantization algorithm')
    parser.add_argument('--enable_pruning',
                        action='store_true',
                        help='Enable NNCF pruning algorithm')
    parser.add_argument(
        '--aux-config-opts',
        nargs='+',
        default=None,
        help='Modify aux config options using the command-line')
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_nncf_used = args.enable_quantization or args.enable_pruning
    if is_nncf_used:
        print(f'Using NNCF -- making NNCF changes in config')
        cfg = make_nncf_changes_in_config(cfg, args.enable_quantization,
                                          args.enable_pruning, args.opts)

    set_random_seed(cfg.train.seed, cfg.train.deterministic)

    log_name = 'test.log' if cfg.test.evaluate else 'train.log'
    log_name += time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))

    print('Show configuration\n{}\n'.format(cfg))

    if cfg.use_gpu:
        torch.backends.cudnn.benchmark = True

    num_aux_models = len(cfg.mutual_learning.aux_configs)
    datamanager = build_datamanager(cfg, args.classes)
    num_train_classes = datamanager.num_train_pids

    print('Building main model: {}'.format(cfg.model.name))
    model = torchreid.models.build_model(
        **model_kwargs(cfg, num_train_classes))
    macs, num_params = get_model_complexity_info(
        model, (3, cfg.data.height, cfg.data.width),
        as_strings=False,
        verbose=False,
        print_per_layer_stat=False)
    print('Main model complexity: params={:,} flops={:,}'.format(
        num_params, macs * 2))

    aux_lr = cfg.train.lr  # placeholder, needed for aux models, may be filled by nncf part below
    if is_nncf_used:
        print('Begin making NNCF changes in model')
        if cfg.use_gpu:
            model.cuda()

        compression_ctrl, model, cfg, aux_lr, nncf_metainfo = \
            make_nncf_changes_in_training(model, cfg,
                                          args.classes,
                                          args.opts)

        should_freeze_aux_models = True
        print(f'should_freeze_aux_models = {should_freeze_aux_models}')
        print('End making NNCF changes in model')
    else:
        compression_ctrl = None
        should_freeze_aux_models = False
        nncf_metainfo = None
    # creating optimizer and scheduler -- it should be done after NNCF part, since
    # NNCF could change some parameters
    optimizer = torchreid.optim.build_optimizer(model, **optimizer_kwargs(cfg))

    if cfg.lr_finder.enable and not cfg.model.resume:
        scheduler = None
    else:
        scheduler = torchreid.optim.build_lr_scheduler(
            optimizer=optimizer,
            num_iter=datamanager.num_iter,
            **lr_scheduler_kwargs(cfg))
    # Loading model (and optimizer and scheduler in case of resuming training).
    # Note that if NNCF is used, loading is done inside NNCF part, so loading here is not required.
    if cfg.model.resume and check_isfile(
            cfg.model.resume) and not is_nncf_used:
        device_ = 'cuda' if cfg.use_gpu else 'cpu'
        cfg.train.start_epoch = resume_from_checkpoint(cfg.model.resume,
                                                       model,
                                                       optimizer=optimizer,
                                                       scheduler=scheduler,
                                                       device=device_)
    elif cfg.model.load_weights and not is_nncf_used:
        load_pretrained_weights(model, cfg.model.load_weights)

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=cfg.test.evaluate)

    model, extra_device_ids = put_main_model_on_the_device(
        model, cfg.use_gpu, args.gpu_num, num_aux_models, args.split_models)

    if cfg.lr_finder.enable and not cfg.test.evaluate and not cfg.model.resume:
        aux_lr, model, optimizer, scheduler = run_lr_finder(
            cfg,
            datamanager,
            model,
            optimizer,
            scheduler,
            args.classes,
            rebuild_model=True,
            gpu_num=args.gpu_num,
            split_models=args.split_models)

    log_dir = cfg.data.tb_log_dir if cfg.data.tb_log_dir else cfg.data.save_dir
    run_training(cfg,
                 datamanager,
                 model,
                 optimizer,
                 scheduler,
                 extra_device_ids,
                 aux_lr,
                 tb_writer=SummaryWriter(log_dir=log_dir),
                 should_freeze_aux_models=should_freeze_aux_models,
                 nncf_metainfo=nncf_metainfo,
                 compression_ctrl=compression_ctrl)
Exemple #29
0
    def train(self,
              dataset: DatasetEntity,
              output_model: ModelEntity,
              train_parameters: Optional[TrainParameters] = None):
        """ Trains a model on a dataset """

        train_model = deepcopy(self._model)

        if train_parameters is not None:
            update_progress_callback = train_parameters.update_progress
        else:
            update_progress_callback = default_progress_callback
        time_monitor = TrainingProgressCallback(
            update_progress_callback,
            num_epoch=self._cfg.train.max_epoch,
            num_train_steps=math.ceil(
                len(dataset.get_subset(Subset.TRAINING)) /
                self._cfg.train.batch_size),
            num_val_steps=0,
            num_test_steps=0)

        self.metrics_monitor = DefaultMetricsMonitor()
        self.stop_callback.reset()

        set_random_seed(self._cfg.train.seed)
        train_subset = dataset.get_subset(Subset.TRAINING)
        val_subset = dataset.get_subset(Subset.VALIDATION)
        self._cfg.custom_datasets.roots = [
            OTEClassificationDataset(train_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels),
            OTEClassificationDataset(val_subset,
                                     self._labels,
                                     self._multilabel,
                                     keep_empty_label=self._empty_label
                                     in self._labels)
        ]
        datamanager = torchreid.data.ImageDataManager(
            **imagedata_kwargs(self._cfg))

        num_aux_models = len(self._cfg.mutual_learning.aux_configs)

        if self._cfg.use_gpu:
            main_device_ids = list(range(self.num_devices))
            extra_device_ids = [main_device_ids for _ in range(num_aux_models)]
            train_model = DataParallel(train_model,
                                       device_ids=main_device_ids,
                                       output_device=0).cuda(
                                           main_device_ids[0])
        else:
            extra_device_ids = [None for _ in range(num_aux_models)]

        optimizer = torchreid.optim.build_optimizer(
            train_model, **optimizer_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            scheduler = None
        else:
            scheduler = torchreid.optim.build_lr_scheduler(
                optimizer,
                num_iter=datamanager.num_iter,
                **lr_scheduler_kwargs(self._cfg))

        if self._cfg.lr_finder.enable:
            _, train_model, optimizer, scheduler = \
                        run_lr_finder(self._cfg, datamanager, train_model, optimizer, scheduler, None,
                                      rebuild_model=False, gpu_num=self.num_devices, split_models=False)

        _, final_acc = run_training(self._cfg,
                                    datamanager,
                                    train_model,
                                    optimizer,
                                    scheduler,
                                    extra_device_ids,
                                    self._cfg.train.lr,
                                    tb_writer=self.metrics_monitor,
                                    perf_monitor=time_monitor,
                                    stop_callback=self.stop_callback)

        training_metrics = self._generate_training_metrics_group()

        self.metrics_monitor.close()
        if self.stop_callback.check_stop():
            logger.info('Training cancelled.')
            return

        logger.info("Training finished.")

        best_snap_path = os.path.join(self._scratch_space, 'best.pth')
        if os.path.isfile(best_snap_path):
            load_pretrained_weights(self._model, best_snap_path)

        for filename in os.listdir(self._scratch_space):
            match = re.match(r'best_(aux_model_[0-9]+\.pth)', filename)
            if match:
                aux_model_name = match.group(1)
                best_aux_snap_path = os.path.join(self._scratch_space,
                                                  filename)
                self._aux_model_snap_paths[aux_model_name] = best_aux_snap_path

        self.save_model(output_model)
        performance = Performance(score=ScoreMetric(value=final_acc,
                                                    name="accuracy"),
                                  dashboard_metrics=training_metrics)
        logger.info(f'FINAL MODEL PERFORMANCE {performance}')
        output_model.performance = performance
def main():
    parser = build_base_argparser()
    args = parser.parse_args()

    cfg = get_default_config()
    cfg.use_gpu = torch.cuda.is_available() and args.gpu_num > 0
    if args.config_file:
        merge_from_files_with_base(cfg, args.config_file)
    reset_config(cfg, args)
    cfg.merge_from_list(args.opts)

    is_ie_model = cfg.model.load_weights.endswith('.xml')
    if not is_ie_model:
        compression_hyperparams = get_compression_hyperparams(
            cfg.model.load_weights)
        is_nncf_used = compression_hyperparams[
            'enable_quantization'] or compression_hyperparams['enable_pruning']

        if is_nncf_used:
            print(f'Using NNCF -- making NNCF changes in config')
            cfg = make_nncf_changes_in_config(
                cfg, compression_hyperparams['enable_quantization'],
                compression_hyperparams['enable_pruning'], args.opts)
    else:
        is_nncf_used = False

    set_random_seed(cfg.train.seed)

    log_name = 'test.log' + time.strftime('-%Y-%m-%d-%H-%M-%S')
    sys.stdout = Logger(osp.join(cfg.data.save_dir, log_name))
    datamanager = torchreid.data.ImageDataManager(filter_classes=args.classes,
                                                  **imagedata_kwargs(cfg))
    num_classes = len(
        datamanager.test_loader[cfg.data.targets[0]]['query'].dataset.classes)
    cfg.train.ema.enable = False
    if not is_ie_model:
        model = torchreid.models.build_model(**model_kwargs(cfg, num_classes))
        load_pretrained_weights(model, cfg.model.load_weights)
        if is_nncf_used:
            print('Begin making NNCF changes in model')
            model = make_nncf_changes_in_eval(model, cfg)
            print('End making NNCF changes in model')
        if cfg.use_gpu:
            num_devices = min(torch.cuda.device_count(), args.gpu_num)
            main_device_ids = list(range(num_devices))
            model = DataParallel(model,
                                 device_ids=main_device_ids,
                                 output_device=0).cuda(main_device_ids[0])
    else:
        from torchreid.utils.ie_tools import VectorCNN
        from openvino.inference_engine import IECore
        cfg.test.batch_size = 1
        model = VectorCNN(IECore(),
                          cfg.model.load_weights,
                          'CPU',
                          switch_rb=True,
                          **model_kwargs(cfg, num_classes))
        for _, dataloader in datamanager.test_loader.items():
            dataloader['query'].dataset.transform.transforms = \
                dataloader['query'].dataset.transform.transforms[:-2]

    if cfg.model.type == 'classification':
        check_classification_classes(model,
                                     datamanager,
                                     args.classes,
                                     test_only=True)

    engine = build_engine(cfg=cfg,
                          datamanager=datamanager,
                          model=model,
                          optimizer=None,
                          scheduler=None)
    engine.test(0,
                dist_metric=cfg.test.dist_metric,
                normalize_feature=cfg.test.normalize_feature,
                visrank=cfg.test.visrank,
                visrank_topk=cfg.test.visrank_topk,
                save_dir=cfg.data.save_dir,
                use_metric_cuhk03=cfg.cuhk03.use_metric_cuhk03,
                ranks=(1, 5, 10, 20),
                rerank=cfg.test.rerank)