Exemplo n.º 1
0
def main(args):
    os.environ['PADDLESEG_EXPORT_STAGE'] = 'True'
    cfg = Config(args.cfg)
    net = cfg.model

    if args.model_path:
        para_state_dict = paddle.load(args.model_path)
        net.set_dict(para_state_dict)
        logger.info('Loaded trained params of model successfully.')

    net.forward = paddle.jit.to_static(net.forward)
    in_shape = [1] + list(cfg.val_dataset[0][0].shape)
    in_var = paddle.ones(in_shape)
    out = net(in_var)
    save_path = os.path.join(args.save_dir, 'model')
    paddle.jit.save(net, save_path, input_spec=[in_var])

    yml_file = os.path.join(args.save_dir, 'deploy.yaml')
    with open(yml_file, 'w') as file:
        transforms = cfg.dic['val_dataset']['transforms']
        data = {
            'Deploy': {
                'transforms': transforms,
                'model': 'model.pdmodel',
                'params': 'model.pdiparams'
            }
        }
        yaml.dump(data, file)

    logger.info(f'Model is saved in {args.save_dir}.')
Exemplo n.º 2
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    image_list, image_dir = get_image_list(args.image_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    test_config = get_test_config(cfg, args)
    config_check(cfg, val_dataset=val_dataset)

    predict(model,
            model_path=args.model_path,
            val_dataset=val_dataset,
            image_list=image_list,
            image_dir=image_dir,
            save_dir=args.save_dir,
            **test_config)
Exemplo n.º 3
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    if args.model_path:
        para_state_dict = paddle.load(args.model_path)
        model.set_dict(para_state_dict)
        logger.info('Loaded trained params of model successfully')
    evaluate(model, val_dataset)
Exemplo n.º 4
0
 def init_weight(self):
     if self.pretrained is not None:
         para_state_dict = paddle.load(self.pretrained)
         model_state_dict = self.backbone.state_dict()
         keys = model_state_dict.keys()
         num_params_loaded = 0
         for k in keys:
             k_parts = k.split('.')
             torchkey = 'backbone.' + k
             if k_parts[1] == 'layer5':
                 logger.warning("{} should not be loaded".format(k))
             elif torchkey not in para_state_dict:
                 logger.warning("{} is not in pretrained model".format(k))
             elif list(para_state_dict[torchkey].shape) != list(
                     model_state_dict[k].shape):
                 logger.warning(
                     "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
                     .format(k, para_state_dict[torchkey].shape,
                             model_state_dict[k].shape))
             else:
                 model_state_dict[k] = para_state_dict[torchkey]
                 num_params_loaded += 1
         self.backbone.set_dict(model_state_dict)
         logger.info("There are {}/{} variables loaded into {}.".format(
             num_params_loaded, len(model_state_dict),
             self.backbone.__class__.__name__))
Exemplo n.º 5
0
def main(args):
    os.environ['PADDLESEG_EXPORT_STAGE'] = 'True'
    cfg = Config(args.cfg)
    net = cfg.model

    if args.model_path:
        para_state_dict = paddle.load(args.model_path)
        net.set_dict(para_state_dict)
        logger.info('Loaded trained params of model successfully.')

    net.eval()
    net = paddle.jit.to_static(net,
                               input_spec=[
                                   paddle.static.InputSpec(
                                       shape=[None, 3, None, None],
                                       dtype='float32')
                               ])
    save_path = os.path.join(args.save_dir, 'model')
    paddle.jit.save(net, save_path)

    yml_file = os.path.join(args.save_dir, 'deploy.yaml')
    with open(yml_file, 'w') as file:
        transforms = cfg.export_config.get('transforms', [{
            'type': 'Normalize'
        }])
        data = {
            'Deploy': {
                'transforms': transforms,
                'model': 'model.pdmodel',
                'params': 'model.pdiparams'
            }
        }
        yaml.dump(data, file)

    logger.info(f'Model is saved in {args.save_dir}.')
Exemplo n.º 6
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = Compose(cfg.val_transforms)
    print(transforms)

    image_list, image_dir = get_image_list(args.image_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    test_config = get_test_config(cfg, args)

    predict(model,
            model_path=args.model_path,
            transforms=transforms,
            image_list=image_list,
            image_dir=image_dir,
            save_dir=args.save_dir,
            **test_config)
Exemplo n.º 7
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms
    image_list, image_dir = get_image_list(args.image_path)

    predict(model,
            model_path=args.model_path,
            transforms=transforms,
            image_list=image_list,
            image_dir=image_dir,
            save_dir=args.save_dir)
Exemplo n.º 8
0
def main(args):

    if args.seed is not None:
        paddle.seed(args.seed)
        np.random.seed(args.seed)
        random.seed(args.seed)

    env_info = get_sys_env()
    info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
    info = '\n'.join(['', format('Environment Information', '-^48s')] + info +
                     ['-' * 48])
    logger.info(info)

    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(
        args.cfg,
        learning_rate=args.learning_rate,
        iters=args.iters,
        batch_size=args.batch_size)

    train_dataset = cfg.train_dataset
    if train_dataset is None:
        raise RuntimeError(
            'The training dataset is not specified in the configuration file.')
    elif len(train_dataset) == 0:
        raise ValueError(
            'The length of train_dataset is 0. Please check if your dataset is valid'
        )
    val_dataset = cfg.val_dataset if args.do_eval else None
    losses = cfg.loss

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    config_check(cfg, train_dataset=train_dataset, val_dataset=val_dataset)

    train(
        cfg.model,
        train_dataset,
        val_dataset=val_dataset,
        optimizer=cfg.optimizer,
        save_dir=args.save_dir,
        iters=cfg.iters,
        batch_size=cfg.batch_size,
        resume_model=args.resume_model,
        save_interval=args.save_interval,
        log_iters=args.log_iters,
        num_workers=args.num_workers,
        use_vdl=args.use_vdl,
        losses=losses,
        keep_checkpoint_max=args.keep_checkpoint_max)
Exemplo n.º 9
0
    def training(self, epoch, log_iters=10, save_epoch=1):

        if self.cfg.distributed:
            self.train_sample.set_epoch(epoch)

        for metric in self.train_metrics:
            metric.reset_epoch_stats()
        log_prefix = 'Train' + self.task_prefix.capitalize()

        self.net.train()
        train_loss = 0.0
        for i, batch_data in enumerate(self.train_data):
            global_step = epoch * len(self.train_data) + i
            loss, losses_logging, splitted_batch_data, outputs = \
                self.batch_forward(batch_data)

            self.optim[0].clear_grad()
            self.optim[1].clear_grad()
            loss.backward()
            self.optim[0].step()
            self.optim[1].step()
            losses_logging['overall'] = loss
            losses_logging = reduce_loss_dict(losses_logging)
            train_loss += losses_logging['overall']
            lr = self.optim[0].get_lr()

            if self.is_master:
                if global_step % log_iters == 0:
                    logger.info('Epoch={}, Step={}, loss={:.4f}, lr={}'.format(
                        epoch, global_step, float(loss), lr))
                    for loss_name, loss_value in losses_logging.items():

                        self.sw.add_scalar(f'{log_prefix}Losses/{loss_name}',
                                           loss_value.numpy(), global_step)
                        self.sw.add_scalar('Train/lr', lr, global_step)
                    for metric in self.train_metrics:
                        metric.log_states(
                            self.sw, f'{log_prefix}Metrics/{metric.name}',
                            global_step)

        if self.is_master:
            if isinstance(self.checkpoint_interval, (list, tuple)):
                checkpoint_interval = [
                    x for x in self.checkpoint_interval if x[0] <= epoch
                ][-1][1]
            else:
                checkpoint_interval = self.checkpoint_interval

            if epoch % checkpoint_interval == 0:
                print('saving model .........')
                save_checkpoint(self.net,
                                self.cfg.CHECKPOINTS_PATH,
                                prefix=self.task_prefix,
                                epoch=epoch)
                print('finish save model!')
Exemplo n.º 10
0
 def _init_cpu_config(self):
     """
     Init the config for x86 cpu.
     """
     logger.info("Use CPU")
     self.pred_cfg.disable_gpu()
     if self.args.enable_mkldnn:
         logger.info("Use MKLDNN")
         # cache 10 different shapes for mkldnn
         self.pred_cfg.set_mkldnn_cache_capacity(10)
         self.pred_cfg.enable_mkldnn()
     self.pred_cfg.set_cpu_math_library_num_threads(self.args.cpu_threads)
Exemplo n.º 11
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    cfg_1 = Config(args.cfg_1)
    cfg_crop = Config(args.cfg_crop)
    val_dataset_crop = cfg_crop.val_dataset

    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    model_1 = cfg_1.model
    model_crop = cfg_crop.model
    transforms = val_dataset.transforms
    transforms_crop = val_dataset_crop.transforms
    image_list, image_dir = get_image_list(args.image_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))
    predictEnsembleThree(
        model,
        model_1,
        model_crop,
        model_path=args.model_path,
        model_path_1=args.model_path_1,
        model_path_crop=args.model_path_crop,
        transforms=transforms,
        transforms_crop=transforms_crop,
        image_list=image_list,
        image_dir=image_dir,
        save_dir=args.save_dir,
        aug_pred=args.aug_pred,
        scales=args.scales,
        flip_horizontal=args.flip_horizontal,
        flip_vertical=args.flip_vertical,
        is_slide=args.is_slide,
        crop_size=args.crop_size,
        stride=args.stride,
    )
Exemplo n.º 12
0
    def _init_gpu_config(self):
        """
        Init the config for nvidia gpu.
        """
        logger.info("Use GPU")
        self.pred_cfg.enable_use_gpu(100, 0)
        precision_map = {
            "fp16": PrecisionType.Half,
            "fp32": PrecisionType.Float32,
            "int8": PrecisionType.Int8
        }
        precision_mode = precision_map[self.args.precision]

        if self.args.use_trt:
            logger.info("Use TRT")
            self.pred_cfg.enable_tensorrt_engine(workspace_size=1 << 30,
                                                 max_batch_size=1,
                                                 min_subgraph_size=50,
                                                 precision_mode=precision_mode,
                                                 use_static=False,
                                                 use_calib_mode=False)

            if use_auto_tune(self.args) and \
                os.path.exists(self.args.auto_tuned_shape_file):
                logger.info("Use auto tuned dynamic shape")
                allow_build_at_runtime = True
                self.pred_cfg.enable_tuned_tensorrt_dynamic_shape(
                    self.args.auto_tuned_shape_file, allow_build_at_runtime)
            else:
                logger.info("Use manual set dynamic shape")
                min_input_shape = {"x": [1, 3, 100, 100]}
                max_input_shape = {"x": [1, 3, 2000, 3000]}
                opt_input_shape = {"x": [1, 3, 512, 1024]}
                self.pred_cfg.set_trt_dynamic_shape_info(
                    min_input_shape, max_input_shape, opt_input_shape)
Exemplo n.º 13
0
    def run_dataset(self):
        """
        Read the data from dataset and calculate the accurary of the inference model.
        """
        dataset = get_dataset(self.args)

        input_names = self.predictor.get_input_names()
        input_handle = self.predictor.get_input_handle(input_names[0])
        output_names = self.predictor.get_output_names()
        output_handle = self.predictor.get_output_handle(output_names[0])

        intersect_area_all = 0
        pred_area_all = 0
        label_area_all = 0
        total_time = 0
        progbar_val = progbar.Progbar(target=len(dataset), verbose=1)

        for idx, (img, label) in enumerate(dataset):
            data = np.array([img])
            input_handle.reshape(data.shape)
            input_handle.copy_from_cpu(data)

            start_time = time.time()
            self.predictor.run()
            end_time = time.time()
            total_time += (end_time - start_time)

            pred = output_handle.copy_to_cpu()
            pred = self._postprocess(pred)
            pred = paddle.to_tensor(pred, dtype='int64')
            label = paddle.to_tensor(label, dtype="int32")
            if pred.shape != label.shape:
                label = paddle.unsqueeze(label, 0)
                label = F.interpolate(label, pred.shape[-2:])
                label = paddle.squeeze(label, 0)

            intersect_area, pred_area, label_area = metrics.calculate_area(
                pred,
                label,
                dataset.num_classes,
                ignore_index=dataset.ignore_index)

            intersect_area_all = intersect_area_all + intersect_area
            pred_area_all = pred_area_all + pred_area
            label_area_all = label_area_all + label_area

            progbar_val.update(idx + 1)

        class_iou, miou = metrics.mean_iou(intersect_area_all, pred_area_all,
                                           label_area_all)
        class_acc, acc = metrics.accuracy(intersect_area_all, pred_area_all)
        kappa = metrics.kappa(intersect_area_all, pred_area_all, label_area_all)

        logger.info(
            "[EVAL] #Images: {} mIoU: {:.4f} Acc: {:.4f} Kappa: {:.4f} ".format(
                len(dataset), miou, acc, kappa))
        logger.info("[EVAL] Class IoU: \n" + str(np.round(class_iou, 4)))
        logger.info("[EVAL] Class Acc: \n" + str(np.round(class_acc, 4)))
        logger.info("[EVAL] Average time: %.3f ms/img" %
                    (total_time / len(dataset)) * 1000)
Exemplo n.º 14
0
def load_ema_model(model, resume_model):
    if resume_model is not None:
        logger.info('Load ema model from {}'.format(resume_model))
        if os.path.exists(resume_model):
            resume_model = os.path.normpath(resume_model)
            ckpt_path = os.path.join(resume_model, 'model.pdparams')
            para_state_dict = paddle.load(ckpt_path)
            model.set_state_dict(para_state_dict)
        else:
            raise ValueError(
                'Directory of the model needed to resume is not Found: {}'.
                format(resume_model))
    else:
        logger.info('No model needed to resume.')
Exemplo n.º 15
0
def analyze(args):
    env_info = get_sys_env()
    info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
    info = '\n'.join(['', format('Environment Information', '-^48s')] + info +
                     ['-' * 48])
    logger.info(info)

    paddle.set_device('cpu')

    cfg = Config(args.config)

    custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn}
    inputs = paddle.randn(args.input_size)
    _dynamic_flops(cfg.model, inputs, custom_ops=custom_ops, print_detail=True)
Exemplo n.º 16
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    # Only support for the DeepLabv3+ model
    if args.data_format == 'NHWC':
        if cfg.dic['model']['type'] != 'DeepLabV3P':
            raise ValueError(
                'The "NHWC" data format only support the DeepLabV3P model!')
        cfg.dic['model']['data_format'] = args.data_format
        cfg.dic['model']['backbone']['data_format'] = args.data_format
        loss_len = len(cfg.dic['loss']['types'])
        for i in range(loss_len):
            cfg.dic['loss']['types'][i]['data_format'] = args.data_format

    val_dataset = cfg.val_dataset
    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    if args.model_path:
        utils.load_entire_model(model, args.model_path)
        logger.info('Loaded trained params of model successfully')

    test_config = get_test_config(cfg, args)
    config_check(cfg, val_dataset=val_dataset)

    evaluate(model,
             val_dataset,
             num_workers=args.num_workers,
             is_view=args.is_view,
             save_dir=args.save_dir,
             **test_config)
Exemplo n.º 17
0
def skip_quant(model):
    """
    If the model has backbone and head, we skip quantizing the conv2d and linear ops
    that belongs the head.
    """
    if not hasattr(model, 'backbone'):
        logger.info("Quantize all target ops")
        return

    logger.info("Quantize all target ops in backbone")
    for name, cur_layer in model.named_sublayers():
        if isinstance(cur_layer, (paddle.nn.Conv2D, paddle.nn.Linear)) \
            and "backbone" not in name:
            cur_layer.skip_quant = True
Exemplo n.º 18
0
    def validation(self, epoch, log_iters=10, save_epoch=1):
        log_prefix = 'Val' + self.task_prefix.capitalize()

        for metric in self.val_metrics:
            metric.reset_epoch_stats()
        val_loss = 0
        losses_logging = defaultdict(list)
        self.net.eval()

        with paddle.no_grad():
            for i, batch_data in enumerate(self.val_data):
                val_global_step = epoch * len(self.val_data) + i

                loss, batch_losses_logging, splitted_batch_data, outputs = \
                    self.batch_forward(batch_data, validation=True)
                batch_losses_logging['overall'] = loss
                reduce_loss_dict(batch_losses_logging)
                for loss_name, loss_value in batch_losses_logging.items():
                    losses_logging[loss_name].append(loss_value.numpy())

                val_loss += batch_losses_logging['overall'].numpy()

                if self.is_master:
                    logger.info(
                        f'Epoch {epoch}, validation loss: {val_loss[0]/(i + 1):.4f}'
                    )
                    for metric in self.val_metrics:
                        metric.log_states(
                            self.sw, f'{log_prefix}Metrics/{metric.name}',
                            val_global_step)

            if self.is_master:
                for loss_name, loss_values in losses_logging.items():
                    self.sw.add_scalar(f'{log_prefix}Losses/{loss_name}',
                                       np.array(loss_values).mean(), epoch)

                for metric in self.val_metrics:
                    self.sw.add_scalar(f'{log_prefix}Metrics/{metric.name}',
                                       metric.get_epoch_value(), epoch)

                if val_global_step % log_iters == 0 and self.is_master:
                    logger.info('Epoch={}, Step={}, loss={:.4f}'.format(
                        epoch, val_global_step, float(loss)))

                    for metric in self.val_metrics:
                        metric.log_states(
                            self.sw, f'{log_prefix}Metrics/{metric.name}',
                            val_global_step)
Exemplo n.º 19
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)

    if cfg.dic["data"]["target"]["dataset"] == 'cityscapes':
        val_dataset = CityDataset(split='val',
                                  **cfg.dic["data"]["target"]["kwargs"])
    else:
        raise NotImplementedError()

    if len(val_dataset) < 500:
        print(len(val_dataset))
        for i in range(len(val_dataset)):
            print(val_dataset[i])

    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    if args.model_path:
        utils.load_entire_model(model, args.model_path)
        logger.info('Loaded trained params of model successfully')

    test_config = get_test_config(cfg, args)

    val.evaluate(model,
                 val_dataset,
                 num_workers=args.num_workers,
                 **test_config)
Exemplo n.º 20
0
    def on_iter_end(self, iter, logs=None):

        if iter % self.log_freq == 0 and ParallelEnv().local_rank == 0:
            total_iters = self.params["total_iters"]
            iters_per_epoch = self.params["iters_per_epoch"]
            remaining_iters = total_iters - iter
            eta = self._calculate_eta(remaining_iters, logs["batch_cost"])
            current_epoch = (iter - 1) // self.params["iters_per_epoch"] + 1
            loss = logs["loss"]
            lr = self.optimizer.get_lr()
            batch_cost = logs["batch_cost"]
            reader_cost = logs["reader_cost"]

            logger.info(
                "[TRAIN] epoch={}, iter={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.4f} | ETA {}"
                .format(current_epoch, iter, total_iters, loss, lr, batch_cost,
                        reader_cost, eta))
Exemplo n.º 21
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    if args.model_path:
        utils.load_entire_model(model, args.model_path)
        logger.info('Loaded trained params of model successfully')

    config_check(cfg, val_dataset=val_dataset)

    evaluate(
        model,
        val_dataset,
        aug_eval=args.aug_eval,
        scales=args.scales,
        flip_horizontal=args.flip_horizontal,
        flip_vertical=args.flip_vertical,
        is_slide=args.is_slide,
        crop_size=args.crop_size,
        stride=args.stride,
        num_workers=args.num_workers,
    )
Exemplo n.º 22
0
    def run(self, imgs, trimaps=None, imgs_dir=None):
        self.imgs_dir = imgs_dir
        num = len(imgs)
        input_names = self.predictor.get_input_names()
        input_handle = {}

        for i in range(len(input_names)):
            input_handle[input_names[i]] = self.predictor.get_input_handle(
                input_names[i])
        output_names = self.predictor.get_output_names()
        output_handle = self.predictor.get_output_handle(output_names[0])
        args = self.args

        for i in tqdm.tqdm(range(0, num, args.batch_size)):
            img_inputs = []
            if trimaps is not None:
                trimap_inputs = []
            trans_info = []
            for j in range(i, i + args.batch_size):
                img = imgs[i]
                trimap = trimaps[i] if trimaps is not None else None
                data = self._preprocess(img=img, trimap=trimap)
                img_inputs.append(data['img'])
                if trimaps is not None:
                    trimap_inputs.append(data['trimap'][np.newaxis, :, :])
                trans_info.append(data['trans_info'])
            img_inputs = np.array(img_inputs)
            if trimaps is not None:
                trimap_inputs = (np.array(trimap_inputs)).astype('float32')

            input_handle['img'].copy_from_cpu(img_inputs)
            if trimaps is not None:
                input_handle['trimap'].copy_from_cpu(trimap_inputs)
            self.predictor.run()
            results = output_handle.copy_to_cpu()

            results = results.squeeze(1)
            for j in range(args.batch_size):
                trimap = trimap_inputs[j] if trimaps is not None else None
                result = self._postprocess(
                    results[j], trans_info[j], trimap=trimap)
                self._save_imgs(result, imgs[i + j])
        logger.info("Finish")
Exemplo n.º 23
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'
    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms

    alpha = predict(model,
                    model_path=args.model_path,
                    transforms=transforms,
                    image_list=[args.image_path],
                    trimap_list=[args.trimap_path],
                    save_dir=args.save_dir)

    img_ori = cv2.imread(args.image_path)
    bg = get_bg(args.bg_path, img_ori.shape)
    alpha = alpha / 255
    alpha = alpha[:, :, np.newaxis]
    com = alpha * img_ori + (1 - alpha) * bg
    com = com.astype('uint8')
    com_save_path = os.path.join(args.save_dir,
                                 os.path.basename(args.image_path))
    cv2.imwrite(com_save_path, com)
Exemplo n.º 24
0
    def _load_weights(self, net):
        if self.cfg.weights is not None:
            if os.path.isfile(self.cfg.weights):

                load_weights(net, self.cfg.weights)
                self.cfg.weights = None
            else:
                raise RuntimeError(
                    f"=> no checkpoint found at f'{self.cfg.weights}'")
        elif self.cfg.resume_exp is not None:
            checkpoints = list(
                self.cfg.CHECKPOINTS_PATH.glob(
                    f'{self.cfg.resume_prefix}*.pth'))
            assert len(checkpoints) == 1

            checkpoint_path = checkpoints[0]
            logger.info(f'Load checkpoint from path: {checkpoint_path}')
            load_weights(net, str(checkpoint_path))
        return net
Exemplo n.º 25
0
    def run(self, num_epochs, start_epoch=None, validation=True):
        if start_epoch is None:
            start_epoch = self.cfg.start_epoch

        logger.info(f'Starting Epoch: {start_epoch}')
        logger.info(f'Total Epochs: {num_epochs}')
        for epoch in range(start_epoch, num_epochs):

            self.training(epoch)

            if isinstance(self.optim[0]._learning_rate,
                          paddle.optimizer.lr.LRScheduler):
                self.optim[0]._learning_rate.step()
            if isinstance(self.optim[1]._learning_rate,
                          paddle.optimizer.lr.LRScheduler):
                self.optim[1]._learning_rate.step()

            if validation:
                self.validation(epoch)
Exemplo n.º 26
0
def prepare_envs(args):
    """
    Set random seed and the device.
    """
    if args.seed is not None:
        paddle.seed(args.seed)
        np.random.seed(args.seed)
        random.seed(args.seed)

    env_info = get_sys_env()
    info = ['{}: {}'.format(k, v) for k, v in env_info.items()]
    info = '\n'.join(['', format('Environment Information', '-^48s')] + info +
                     ['-' * 48])
    logger.info(info)

    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
Exemplo n.º 27
0
def main(args):
    os.environ['PADDLESEG_EXPORT_STAGE'] = 'True'
    cfg = Config(args.cfg)

    net = cfg.model
    net.eval()
    if args.model_path:
        para_state_dict = paddle.load(args.model_path)
        net.set_dict(para_state_dict)
        logger.info('Loaded trained params of model successfully.')

    if args.input_shape is None:
        shape = [None, 3, None, None]
    else:
        shape = args.input_shape

    input_spec = [{"img": paddle.static.InputSpec(shape=shape, name='img')}]
    if args.trimap:
        shape[1] = 1
        input_spec[0]['trimap'] = paddle.static.InputSpec(shape=shape,
                                                          name='trimap')

    net = paddle.jit.to_static(net, input_spec=input_spec)
    save_path = os.path.join(args.save_dir, 'model')
    paddle.jit.save(net, save_path)

    yml_file = os.path.join(args.save_dir, 'deploy.yaml')
    with open(yml_file, 'w') as file:
        transforms = cfg.val_dataset_config.get('transforms',
                                                [{
                                                    'type': 'Normalize'
                                                }])
        data = {
            'Deploy': {
                'transforms': transforms,
                'model': 'model.pdmodel',
                'params': 'model.pdiparams'
            }
        }
        yaml.dump(data, file)

    logger.info(f'Model is saved in {args.save_dir}.')
Exemplo n.º 28
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if val_dataset is None:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )
    elif len(val_dataset) == 0:
        raise ValueError(
            'The length of val_dataset is 0. Please check if your dataset is valid'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms

    image_list, image_dir = get_image_list(args.image_path)
    if args.trimap_path is None:
        trimap_list = None
    else:
        trimap_list, _ = get_image_list(args.trimap_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    predict(model,
            model_path=args.model_path,
            transforms=transforms,
            image_list=image_list,
            image_dir=image_dir,
            trimap_list=trimap_list,
            save_dir=args.save_dir)
Exemplo n.º 29
0
def evaluate(model, eval_dataset=None, iter_id=None):
    model.eval()

    total_iters = len(eval_dataset)
    conf_mat = ConfusionMatrix(eval_dataset.num_classes, streaming=True)

    logger.info(
        "Start evaluating (total_samples={}, total_iters={})...".format(
            len(eval_dataset), total_iters))
    timer = Timer()
    timer.start()
    for iter, (im, im_info, label) in tqdm.tqdm(enumerate(eval_dataset),
                                                total=total_iters):
        im = paddle.to_tensor(im)
        logits = model(im)
        pred = paddle.argmax(logits[0], axis=1)
        pred = pred.numpy().astype('float32')
        pred = np.squeeze(pred)
        for info in im_info[::-1]:
            if info[0] == 'resize':
                h, w = info[1][0], info[1][1]
                pred = cv2.resize(pred, (w, h), cv2.INTER_NEAREST)
            elif info[0] == 'padding':
                h, w = info[1][0], info[1][1]
                pred = pred[0:h, 0:w]
            else:
                raise ValueError("Unexpected info '{}' in im_info".format(
                    info[0]))
        pred = pred[np.newaxis, :, :, np.newaxis]
        pred = pred.astype('int64')
        mask = label != eval_dataset.ignore_index
        # To-DO Test Execution Time
        conf_mat.calculate(pred=pred, label=label, mask=mask)
        _, iou = conf_mat.mean_iou()

        time_iter = timer.elapsed_time()
        remain_iter = total_iters - iter - 1
        logger.debug(
            "[EVAL] iter_id={}, iter={}/{}, IoU={:4f}, sec/iter={:.4f} | ETA {}"
            .format(iter_id, iter + 1, total_iters, iou, time_iter,
                    calculate_eta(remain_iter, time_iter)))
        timer.restart()

    category_iou, miou = conf_mat.mean_iou()
    category_acc, acc = conf_mat.accuracy()
    logger.info(
        "[EVAL] #Images={} mIoU={:.4f} Acc={:.4f} Kappa={:.4f} ".format(
            len(eval_dataset), miou, acc, conf_mat.kappa()))
    logger.info("[EVAL] Category IoU: \n" + str(np.round(category_iou, 4)))
    logger.info("[EVAL] Category Acc: \n" + str(np.round(category_acc, 4)))
    return miou, acc
Exemplo n.º 30
0
def main(args):
    env_info = get_sys_env()
    place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[
        'GPUs used'] else 'cpu'

    paddle.set_device(place)
    if not args.cfg:
        raise RuntimeError('No configuration file specified.')

    cfg = Config(args.cfg)
    val_dataset = cfg.val_dataset
    if not val_dataset:
        raise RuntimeError(
            'The verification dataset is not specified in the configuration file.'
        )

    msg = '\n---------------Config Information---------------\n'
    msg += str(cfg)
    msg += '------------------------------------------------'
    logger.info(msg)

    model = cfg.model
    transforms = val_dataset.transforms
    image_list, image_dir = get_image_list(args.image_path)
    logger.info('Number of predict images = {}'.format(len(image_list)))

    config_check(cfg, val_dataset=val_dataset)

    predict(model,
            model_path=args.model_path,
            transforms=transforms,
            thing_list=val_dataset.thing_list,
            label_divisor=val_dataset.label_divisor,
            stuff_area=val_dataset.stuff_area,
            ignore_index=val_dataset.ignore_index,
            image_list=image_list,
            image_dir=image_dir,
            save_dir=args.save_dir,
            threshold=args.threshold,
            nms_kernel=args.nms_kernel,
            top_k=args.top_k)