def resume_weights(self, weights):
     # support Distill resume weights
     if hasattr(self.model, 'student_model'):
         self.start_epoch = load_weight(self.model.student_model, weights,
                                        self.optimizer)
     else:
         self.start_epoch = load_weight(self.model, weights, self.optimizer)
     logger.debug("Resume weights of epoch {}".format(self.start_epoch))
Exemple #2
0
def run(FLAGS, cfg):

    # Model
    main_arch = cfg.architecture
    model = create(cfg.architecture)
    inputs_def = cfg['TestReader']['inputs_def']
    assert 'image_shape' in inputs_def, 'image_shape must be specified.'
    image_shape = inputs_def.get('image_shape')

    assert not None in image_shape, 'image_shape should not contain None'
    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(FLAGS.output_dir, cfg_name)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    image_shape = dump_infer_config(cfg,
                                    os.path.join(save_dir, 'infer_cfg.yml'),
                                    image_shape)

    class ExportModel(nn.Layer):
        def __init__(self, model):
            super(ExportModel, self).__init__()
            self.model = model

        @to_static(input_spec=[
            {
                'image': InputSpec(
                    shape=[None] + image_shape, name='image')
            },
            {
                'im_shape': InputSpec(
                    shape=[None, 2], name='im_shape')
            },
            {
                'scale_factor': InputSpec(
                    shape=[None, 2], name='scale_factor')
            },
        ])
        def forward(self, image, im_shape, scale_factor):
            inputs = {}
            inputs_tensor = [image, im_shape, scale_factor]
            for t in inputs_tensor:
                inputs.update(t)
            outs = self.model.get_export_model(inputs)
            return outs

    export_model = ExportModel(model)
    # debug for dy2static, remove later
    #paddle.jit.set_code_level()

    # Init Model
    load_weight(export_model.model, cfg.weights)

    export_model.eval()

    # export config and model
    paddle.jit.save(export_model, os.path.join(save_dir, 'model'))
    logger.info('Export model to {}'.format(save_dir))
def get_model(model_name, pretrained=True):
    cfg_file = get_config_file(model_name)
    cfg = load_config(cfg_file)
    model = create(cfg.architecture)

    if pretrained:
        load_weight(model, get_weights_url(model_name))

    return model
Exemple #4
0
def run(FLAGS, cfg):

    # Model
    main_arch = cfg.architecture
    model = create(cfg.architecture)
    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(FLAGS.output_dir, cfg_name)

    # Init Model
    load_weight(model, cfg.weights)

    # export config and model
    dygraph_to_static(model, save_dir, cfg)
    logger.info('Export model to {}'.format(save_dir))
Exemple #5
0
def run(FLAGS, cfg, place):

    # Model
    main_arch = cfg.architecture
    model = create(cfg.architecture)

    # Init Model
    load_weight(model, cfg.weights)

    # Data Reader
    dataset = cfg.EvalDataset
    eval_loader, _ = create('EvalReader')(dataset, cfg['worker_num'], place)

    # Run Eval
    outs_res = []
    start_time = time.time()
    sample_num = 0
    for iter_id, data in enumerate(eval_loader):
        # forward
        model.eval()
        outs = model(data, cfg['EvalReader']['inputs_def']['fields'], 'infer')
        outs_res.append(outs)

        # log
        sample_num += len(data)
        if iter_id % 100 == 0:
            logger.info("Eval iter: {}".format(iter_id))

    cost_time = time.time() - start_time
    logger.info('Total sample number: {}, averge FPS: {}'.format(
        sample_num, sample_num / cost_time))

    eval_type = ['bbox']
    if getattr(cfg, 'MaskHead', None):
        eval_type.append('mask')
    # Metric
    # TODO: support other metric
    from ppdet.utils.coco_eval import get_category_info
    anno_file = dataset.get_anno()
    with_background = cfg.with_background
    use_default_label = dataset.use_default_label
    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)

    infer_res = get_infer_results(outs_res, eval_type, clsid2catid)
    eval_results(infer_res, cfg.metric, anno_file)
Exemple #6
0
 def load_weights(self, weights, weight_type='pretrain'):
     assert weight_type in ['pretrain', 'resume', 'finetune'], \
             "weight_type can only be 'pretrain', 'resume', 'finetune'"
     if weight_type == 'resume':
         self.start_epoch = load_weight(self.model, weights, self.optimizer)
         logger.debug("Resume weights of epoch {}".format(self.start_epoch))
     else:
         self.start_epoch = 0
         load_pretrain_weight(self.model, weights,
                              self.cfg.get('load_static_weights', False),
                              weight_type)
         logger.debug("Load {} weights {} to start training".format(
             weight_type, weights))
     self._weights_loaded = True
Exemple #7
0
    def load_weights_sde(self, det_weights, reid_weights):
        with_detector = self.model.detector is not None
        with_reid = self.model.reid is not None

        if with_detector:
            load_weight(self.model.detector, det_weights)
            if with_reid:
                load_weight(self.model.reid, reid_weights)
        else:
            load_weight(self.model.reid, reid_weights)
Exemple #8
0
def run(FLAGS, cfg, place):
    env = os.environ
    FLAGS.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
    if FLAGS.dist:
        trainer_id = int(env['PADDLE_TRAINER_ID'])
        local_seed = (99 + trainer_id)
        random.seed(local_seed)
        np.random.seed(local_seed)

    if FLAGS.enable_ce:
        random.seed(0)
        np.random.seed(0)

    if ParallelEnv().nranks > 1:
        paddle.distributed.init_parallel_env()

    # Data
    dataset = cfg.TrainDataset
    train_loader, step_per_epoch = create('TrainReader')(dataset,
                                                         cfg['worker_num'],
                                                         place)

    # Model
    model = create(cfg.architecture)

    # Optimizer
    lr = create('LearningRate')(step_per_epoch)
    optimizer = create('OptimizerBuilder')(lr, model.parameters())

    # Init Model & Optimzer
    if FLAGS.weight_type == 'resume':
        load_weight(model, cfg.pretrain_weights, optimizer)
    else:
        load_pretrain_weight(model, cfg.pretrain_weights,
                             cfg.get('load_static_weights', False),
                             FLAGS.weight_type)

    if getattr(model.backbone, 'norm_type', None) == 'sync_bn':
        assert cfg.use_gpu and ParallelEnv(
        ).nranks > 1, 'you should use bn rather than sync_bn while using a single gpu'
    # sync_bn = (getattr(model.backbone, 'norm_type', None) == 'sync_bn' and
    #            cfg.use_gpu and ParallelEnv().nranks > 1)
    # if sync_bn:
    #     model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    # Parallel Model
    if ParallelEnv().nranks > 1:
        model = paddle.DataParallel(model)

    fields = train_loader.collate_fn.output_fields
    # Run Train
    time_stat = deque(maxlen=cfg.log_iter)
    start_time = time.time()
    end_time = time.time()
    # Run Train
    start_epoch = optimizer.state_dict()['LR_Scheduler']['last_epoch']
    for epoch_id in range(int(cfg.epoch)):
        cur_eid = epoch_id + start_epoch
        train_loader.dataset.epoch = cur_eid
        for iter_id, data in enumerate(train_loader):
            start_time = end_time
            end_time = time.time()
            time_stat.append(end_time - start_time)
            time_cost = np.mean(time_stat)
            eta_sec = (
                (cfg.epoch - cur_eid) * step_per_epoch - iter_id) * time_cost
            eta = str(datetime.timedelta(seconds=int(eta_sec)))

            # Model Forward
            model.train()
            outputs = model(data, fields, 'train')

            # Model Backward
            loss = outputs['loss']
            if ParallelEnv().nranks > 1:
                loss = model.scale_loss(loss)
                loss.backward()
                model.apply_collective_grads()
            else:
                loss.backward()
            optimizer.step()
            curr_lr = optimizer.get_lr()
            lr.step()
            optimizer.clear_grad()

            if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
                # Log state
                if epoch_id == 0 and iter_id == 0:
                    train_stats = TrainingStats(cfg.log_iter, outputs.keys())
                train_stats.update(outputs)
                logs = train_stats.log()
                if iter_id % cfg.log_iter == 0:
                    ips = float(cfg['TrainReader']['batch_size']) / time_cost
                    strs = 'Epoch:{}: iter: {}, lr: {:.6f}, {}, eta: {}, batch_cost: {:.5f} sec, ips: {:.5f} images/sec'.format(
                        cur_eid, iter_id, curr_lr, logs, eta, time_cost, ips)
                    logger.info(strs)

        # Save Stage
        if ParallelEnv().local_rank == 0 and (cur_eid % cfg.snapshot_epoch == 0
                                              or
                                              (cur_eid + 1) == int(cfg.epoch)):
            cfg_name = os.path.basename(FLAGS.config).split('.')[0]
            save_name = str(cur_eid) if cur_eid + 1 != int(
                cfg.epoch) else "model_final"
            save_dir = os.path.join(cfg.save_dir, cfg_name)
            save_model(model, optimizer, save_dir, save_name)
 def load_weights_sde(self, det_weights, reid_weights):
     if self.model.detector:
         load_weight(self.model.detector, det_weights)
         load_weight(self.model.reid, reid_weights)
     else:
         load_weight(self.model.reid, reid_weights)
Exemple #10
0
def run(FLAGS, cfg, place):

    # Model
    main_arch = cfg.architecture
    model = create(cfg.architecture)

    # data
    dataset = cfg.TestDataset
    test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
    dataset.set_images(test_images)
    test_loader = create('TestReader')(dataset, cfg['worker_num'])
    extra_key = ['im_shape', 'scale_factor', 'im_id']

    # TODO: support other metrics
    imid2path = dataset.get_imid2path()

    anno_file = dataset.get_anno()
    with_background = cfg.with_background
    use_default_label = dataset.use_default_label

    if cfg.metric == 'COCO':
        from ppdet.utils.coco_eval import get_category_info
    if cfg.metric == 'VOC':
        from ppdet.utils.voc_eval import get_category_info
    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)

    # Init Model
    load_weight(model, cfg.weights)

    # Run Infer
    for iter_id, data in enumerate(test_loader):
        # forward
        model.eval()
        outs = model(data)
        for key in extra_key:
            outs[key] = data[key]
        for key, value in outs.items():
            outs[key] = value.numpy()

        if 'mask' in outs and 'bbox' in outs:
            mask_resolution = model.mask_post_process.mask_resolution
            from ppdet.py_op.post_process import mask_post_process
            outs['mask'] = mask_post_process(outs, outs['im_shape'],
                                             outs['scale_factor'],
                                             mask_resolution)

        eval_type = []
        if 'bbox' in outs:
            eval_type.append('bbox')
        if 'mask' in outs:
            eval_type.append('mask')

        batch_res = get_infer_results([outs], eval_type, clsid2catid)
        logger.info('Infer iter {}'.format(iter_id))
        bbox_res = None
        mask_res = None

        bbox_num = outs['bbox_num']
        start = 0
        for i, im_id in enumerate(outs['im_id']):
            image_path = imid2path[int(im_id)]
            image = Image.open(image_path).convert('RGB')
            end = start + bbox_num[i]

            # use VisualDL to log original image
            if FLAGS.use_vdl:
                original_image_np = np.array(image)
                vdl_writer.add_image(
                    "original/frame_{}".format(vdl_image_frame),
                    original_image_np, vdl_image_step)

            if 'bbox' in batch_res:
                bbox_res = batch_res['bbox'][start:end]
            if 'mask' in batch_res:
                mask_res = batch_res['mask'][start:end]

            image = visualize_results(image, bbox_res, mask_res,
                                      int(outs['im_id']), catid2name,
                                      FLAGS.draw_threshold)

            # use VisualDL to log image with bbox
            if FLAGS.use_vdl:
                infer_image_np = np.array(image)
                vdl_writer.add_image("bbox/frame_{}".format(vdl_image_frame),
                                     infer_image_np, vdl_image_step)
                vdl_image_step += 1
                if vdl_image_step % 10 == 0:
                    vdl_image_step = 0
                    vdl_image_frame += 1

            # save image with detection
            save_name = get_save_image_name(FLAGS.output_dir, image_path)
            logger.info("Detection bbox results save in {}".format(save_name))
            image.save(save_name, quality=95)
            start = end
Exemple #11
0
 def load_weights_jde(self, weights):
     load_weight(self.model, weights, self.optimizer)
Exemple #12
0
def run(FLAGS, cfg, place):

    # Model
    main_arch = cfg.architecture
    model = create(cfg.architecture)

    # data
    dataset = cfg.TestDataset
    test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
    dataset.set_images(test_images)
    test_loader, _ = create('TestReader')(dataset, cfg['worker_num'], place)

    # TODO: support other metrics
    imid2path = dataset.get_imid2path()

    from ppdet.utils.coco_eval import get_category_info
    anno_file = dataset.get_anno()
    with_background = cfg.with_background
    use_default_label = dataset.use_default_label
    clsid2catid, catid2name = get_category_info(anno_file, with_background,
                                                use_default_label)

    # Init Model
    load_weight(model, cfg.weights)

    # Run Infer
    for iter_id, data in enumerate(test_loader):
        # forward
        model.eval()
        outs = model(data, cfg.TestReader['inputs_def']['fields'], 'infer')

        batch_res = get_infer_results([outs], outs.keys(), clsid2catid)
        logger.info('Infer iter {}'.format(iter_id))
        bbox_res = None
        mask_res = None

        im_ids = outs['im_id']
        bbox_num = outs['bbox_num']
        start = 0
        for i, im_id in enumerate(im_ids):
            im_id = im_ids[i]
            image_path = imid2path[int(im_id)]
            image = Image.open(image_path).convert('RGB')
            end = start + bbox_num[i]

            # use VisualDL to log original image
            if FLAGS.use_vdl:
                original_image_np = np.array(image)
                vdl_writer.add_image(
                    "original/frame_{}".format(vdl_image_frame),
                    original_image_np, vdl_image_step)

            if 'bbox' in batch_res:
                bbox_res = batch_res['bbox'][start:end]
            if 'mask' in batch_res:
                mask_res = batch_res['mask'][start:end]

            image = visualize_results(image, bbox_res, mask_res, int(im_id),
                                      catid2name, FLAGS.draw_threshold)

            # use VisualDL to log image with bbox
            if FLAGS.use_vdl:
                infer_image_np = np.array(image)
                vdl_writer.add_image("bbox/frame_{}".format(vdl_image_frame),
                                     infer_image_np, vdl_image_step)
                vdl_image_step += 1
                if vdl_image_step % 10 == 0:
                    vdl_image_step = 0
                    vdl_image_frame += 1

            # save image with detection
            save_name = get_save_image_name(FLAGS.output_dir, image_path)
            logger.info("Detection bbox results save in {}".format(save_name))
            image.save(save_name, quality=95)
            start = end
Exemple #13
0
def run(FLAGS, cfg, place):
    env = os.environ
    FLAGS.dist = 'PADDLE_TRAINER_ID' in env and 'PADDLE_TRAINERS_NUM' in env
    if FLAGS.dist:
        trainer_id = int(env['PADDLE_TRAINER_ID'])
        local_seed = (99 + trainer_id)
        random.seed(local_seed)
        np.random.seed(local_seed)

    if FLAGS.enable_ce:
        random.seed(0)
        np.random.seed(0)

    if ParallelEnv().nranks > 1:
        paddle.distributed.init_parallel_env()

    # Data
    datasets = cfg.TrainDataset
    train_loader = create('TrainReader')(datasets, cfg['worker_num'])
    steps = len(train_loader)

    # Model
    model = create(cfg.architecture)

    # Optimizer
    lr = create('LearningRate')(steps)
    optimizer = create('OptimizerBuilder')(lr, model.parameters())

    # Init Model & Optimzer
    start_epoch = 0
    if FLAGS.weight_type == 'resume':
        start_epoch = load_weight(model, cfg.pretrain_weights, optimizer)
    else:
        load_pretrain_weight(model, cfg.pretrain_weights,
                             cfg.get('load_static_weights', False),
                             FLAGS.weight_type)

    if getattr(model.backbone, 'norm_type', None) == 'sync_bn':
        assert cfg.use_gpu and ParallelEnv(
        ).nranks > 1, 'you should use bn rather than sync_bn while using a single gpu'
    # sync_bn = (getattr(model.backbone, 'norm_type', None) == 'sync_bn' and
    #            cfg.use_gpu and ParallelEnv().nranks > 1)
    # if sync_bn:
    #     model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model)

    # The parameter filter is temporary fix for training because of #28997
    # in Paddle.
    def no_grad(param):
        if param.name.startswith("conv1_") or param.name.startswith("res2a_") \
            or param.name.startswith("res2b_") or param.name.startswith("res2c_"):
            return True

    for param in filter(no_grad, model.parameters()):
        param.stop_gradient = True

    # Parallel Model
    if ParallelEnv().nranks > 1:
        model = paddle.DataParallel(model)

    cfg_name = os.path.basename(FLAGS.config).split('.')[0]
    save_dir = os.path.join(cfg.save_dir, cfg_name)

    # Run Train
    end_epoch = int(cfg.epoch)
    batch_size = int(cfg['TrainReader']['batch_size'])
    total_steps = (end_epoch - start_epoch) * steps
    step_id = 0

    train_stats = stats.TrainingStats(cfg.log_iter)
    batch_time = stats.SmoothedValue(fmt='{avg:.4f}')
    data_time = stats.SmoothedValue(fmt='{avg:.4f}')

    end_time = time.time()
    space_fmt = ':' + str(len(str(steps))) + 'd'
    # Run Train
    for cur_eid in range(start_epoch, end_epoch):
        datasets.set_epoch(cur_eid)
        for iter_id, data in enumerate(train_loader):
            data_time.update(time.time() - end_time)
            # Model Forward
            model.train()
            outputs = model(data, mode='train')
            loss = outputs['loss']
            # Model Backward
            loss.backward()
            optimizer.step()
            curr_lr = optimizer.get_lr()
            lr.step()
            optimizer.clear_grad()

            batch_time.update(time.time() - end_time)
            if ParallelEnv().nranks < 2 or ParallelEnv().local_rank == 0:
                train_stats.update(outputs)
                logs = train_stats.log()
                if iter_id % cfg.log_iter == 0:
                    eta_sec = (total_steps - step_id) * batch_time.global_avg
                    eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
                    ips = float(batch_size) / batch_time.avg
                    fmt = ' '.join([
                        'Epoch: [{}]',
                        '[{' + space_fmt + '}/{}]',
                        '{meters}',
                        'eta: {eta}',
                        'batch_cost: {btime}',
                        'data_cost: {dtime}',
                        'ips: {ips:.4f} images/s',
                    ])
                    fmt = fmt.format(cur_eid,
                                     iter_id,
                                     steps,
                                     meters=logs,
                                     eta=eta_str,
                                     btime=str(batch_time),
                                     dtime=str(data_time),
                                     ips=ips)
                    logger.info(fmt)
            step_id += 1
            end_time = time.time()  # after copy outputs to CPU.
        # Save Stage
        if (ParallelEnv().local_rank == 0 and \
            (cur_eid % cfg.snapshot_epoch) == 0) or (cur_eid + 1) == end_epoch:
            save_name = str(
                cur_eid) if cur_eid + 1 != end_epoch else "model_final"
            save_model(model, optimizer, save_dir, save_name, cur_eid + 1)
def run(FLAGS, cfg, place):

    # Model
    main_arch = cfg.architecture
    model = create(cfg.architecture)

    # Init Model
    load_weight(model, cfg.weights)

    # Data Reader
    dataset = cfg.EvalDataset
    eval_loader = create('EvalReader')(dataset, cfg['worker_num'])
    extra_key = ['im_shape', 'scale_factor', 'im_id']
    if cfg.metric == 'VOC':
        extra_key += ['gt_bbox', 'gt_class', 'difficult']

    # Run Eval
    outs_res = []
    sample_num = 0
    start_time = time.time()
    for iter_id, data in enumerate(eval_loader):
        # forward
        model.eval()
        outs = model(data, mode='infer')
        for key in extra_key:
            outs[key] = data[key]
        for key, value in outs.items():
            outs[key] = value.numpy()

        if 'mask' in outs and 'bbox' in outs:
            mask_resolution = model.mask_post_process.mask_resolution
            from ppdet.py_op.post_process import mask_post_process
            outs['mask'] = mask_post_process(outs, outs['im_shape'],
                                             outs['scale_factor'],
                                             mask_resolution)

        outs_res.append(outs)
        # log
        sample_num += outs['im_id'].shape[0]
        if iter_id % 100 == 0:
            logger.info("Eval iter: {}".format(iter_id))

    cost_time = time.time() - start_time
    logger.info('Total sample number: {}, averge FPS: {}'.format(
        sample_num, sample_num / cost_time))

    eval_type = []
    if 'bbox' in outs:
        eval_type.append('bbox')
    if 'mask' in outs:
        eval_type.append('mask')
    # Metric
    # TODO: support other metric
    with_background = cfg.with_background
    use_default_label = dataset.use_default_label
    if cfg.metric == 'COCO':
        from ppdet.utils.coco_eval import get_category_info
        clsid2catid, catid2name = get_category_info(dataset.get_anno(),
                                                    with_background,
                                                    use_default_label)

        infer_res = get_infer_results(outs_res, eval_type, clsid2catid)

    elif cfg.metric == 'VOC':
        from ppdet.utils.voc_eval import get_category_info
        clsid2catid, catid2name = get_category_info(dataset.get_label_list(),
                                                    with_background,
                                                    use_default_label)
        infer_res = outs_res

    eval_results(infer_res, cfg.metric, dataset)
Exemple #15
0
 def load_weights_sde(self, det_weights, reid_weights):
     if self.model.detector:
         load_weight(self.model.detector, det_weights, self.optimizer)
     load_weight(self.model.reid, reid_weights, self.optimizer)