コード例 #1
0
ファイル: train.py プロジェクト: zhanzy178/detector
def train_detector(cfg):
    # model
    detector = build_detector(cfg.detector)
    detector.cuda()

    # data
    train_dataset = build_dataset(cfg.dataset.train)
    val_dataset = build_dataset(cfg.dataset.val)
    train_dataloader = DataLoader(train_dataset, batch_size=cfg.img_batch_size)
    val_dataloader = DataLoader(val_dataset, batch_size=cfg.img_batch_size)

    # runner
    runner = Runner(detector, batch_processor, cfg.optimizer, cfg.work_dir)
    runner.register_training_hooks(lr_config=cfg.lr_hook_cfg,
                                   optimizer_config=cfg.optimizer_hook_cfg,
                                   checkpoint_config=cfg.checkpoint_hook_cfg,
                                   log_config=cfg.log_hooks_cfg)

    # checkpoint
    if cfg.load_from and os.path.exists(cfg.load_from):
        load_checkpoint(detector, cfg.load_from, logger=runner.logger)
        runner.logger.info('Load checkpoint from %s...' % cfg.load_from)

    # start training
    runner.run([train_dataloader, val_dataloader], [('train', 1), ('val', 1)],
               cfg.epoch)
コード例 #2
0
def main():
    args = parse_args()

    cfg = Config.fromfile(args.config)

    # set training environment, e.g. distribution, cudnn_benchmark, random_seed for re-prodution
    env.set_env(cfg.env_config)

    # init logger before other steps
    logger = log.get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(True))

    if cfg.checkpoint_config is not None:
        # save satdet version in checkpoints as meta data
        cfg.checkpoint_config.meta = dict(satdet_version=__version__,
                                          config=cfg.text)

    model = build_detector(cfg.model,
                           train_cfg=cfg.train_cfg,
                           test_cfg=cfg.test_cfg)

    train_dataset = get_dataset(cfg.data.train)
    val_dataset = get_dataset(cfg.data.val)

    train_detector(model, [train_dataset, val_dataset], cfg, logger=logger)
コード例 #3
0
from models import build_detector
from tools import inference_detector
from utils import load_checkpoint, Config

cfg = Config.fromfile(
    '/satdetection/satdet/configs/faster_rcnn_x101_64x4d_fpn_1x_dota.py')
cfg.model.pretrained = None

model = build_detector(cfg.model, test_cfg=cfg.test_cfg)
_ = load_checkpoint(
    model,
    '/satdetection/satdet/trained_checkpoints/epoch1_8078_finetune0002.pth')

print('------The model has been loaded.-----')


def inference_single_func(img_path):
    result = inference_detector(model, img_path, cfg, device='cuda:0')
    #print("Array length: ", len(result))
    #for i in range(0, len(result)):
    #    print("[", i, "]: ", result[i])
    return result
コード例 #4
0
ファイル: export_saved_model.py プロジェクト: wavce/letsdet
        cfg.test.kernel = args.nms_kernel

    if args.nms == "NonMaxSuppressionWithQuality":
        assert args.nms_type is not None, "When [--nms] is `NonMaxSuppressionWithQuality`, [--nms_type] is necessary."

    if args.nms in ["MatrixNonMaxSuppression", "SoftNonMaxSuppression"]:
        cfg.test.sigma = args.nms_sigma

    if args.nms == "NonMaxSuppressionWithQuality":
        cfg.test.nms_type = args.nms_type
        if args.nms_type in ["soft_nms", "matrix_nms"]:
            cfg.test.sigma = args.nms_sigma
else:
    cfg.override(args.config)

detector = build_detector(cfg.detector, return_loss=False, cfg=cfg)
images = tf.random.uniform(
    [1, cfg.train.input_size[0], cfg.train.input_size[1], 3])
images = tf.cast(images, tf.uint8)
detector(images)

if args.ckpt is not None and ".h5" in args.ckpt:
    detector.load_weights(args.ckpt)
else:
    optimizer = build_optimizer(**cfg.train.optimizer.as_dict())

    checkpoint = tf.train.Checkpoint(optimizer=optimizer, detector=detector)
    manager = tf.train.CheckpointManager(checkpoint=checkpoint,
                                         directory=cfg.train.checkpoint_dir,
                                         max_to_keep=10)
    latest_checkpoint = manager.latest_checkpoint
コード例 #5
0
def test(cfg, checkpoint, result, eval):
    class_name = VOC_CLASS
    if not os.path.exists(result):
        # compute result and save to result path

        detector = build_detector(cfg.detector)
        detector.cuda()
        # assert os.path.exists(checkpoint)
        # load_checkpoint(detector, checkpoint)
        state_dict = torch.load(
            '/home/zzy/Downloads/faster_rcnn_1_6_10021.pth')
        dstate_dict = detector.state_dict()

        key_mapper = {
            'backbone.features.0.weight': 'RCNN_base.0.weight',
            'backbone.features.0.bias': 'RCNN_base.0.bias',
            'backbone.features.2.weight': 'RCNN_base.2.weight',
            'backbone.features.2.bias': 'RCNN_base.2.bias',
            'backbone.features.5.weight': 'RCNN_base.5.weight',
            'backbone.features.5.bias': 'RCNN_base.5.bias',
            'backbone.features.7.weight': 'RCNN_base.7.weight',
            'backbone.features.7.bias': 'RCNN_base.7.bias',
            'backbone.features.10.weight': 'RCNN_base.10.weight',
            'backbone.features.10.bias': 'RCNN_base.10.bias',
            'backbone.features.12.weight': 'RCNN_base.12.weight',
            'backbone.features.12.bias': 'RCNN_base.12.bias',
            'backbone.features.14.weight': 'RCNN_base.14.weight',
            'backbone.features.14.bias': 'RCNN_base.14.bias',
            'backbone.features.17.weight': 'RCNN_base.17.weight',
            'backbone.features.17.bias': 'RCNN_base.17.bias',
            'backbone.features.19.weight': 'RCNN_base.19.weight',
            'backbone.features.19.bias': 'RCNN_base.19.bias',
            'backbone.features.21.weight': 'RCNN_base.21.weight',
            'backbone.features.21.bias': 'RCNN_base.21.bias',
            'backbone.features.24.weight': 'RCNN_base.24.weight',
            'backbone.features.24.bias': 'RCNN_base.24.bias',
            'backbone.features.26.weight': 'RCNN_base.26.weight',
            'backbone.features.26.bias': 'RCNN_base.26.bias',
            'backbone.features.28.weight': 'RCNN_base.28.weight',
            'backbone.features.28.bias': 'RCNN_base.28.bias',
            'rpn_head.conv.weight': 'RCNN_rpn.RPN_Conv.weight',
            'rpn_head.conv.bias': 'RCNN_rpn.RPN_Conv.bias',
            'rpn_head.obj_cls.weight': 'RCNN_rpn.RPN_cls_score.weight',
            'rpn_head.obj_cls.bias': 'RCNN_rpn.RPN_cls_score.bias',
            'rpn_head.obj_reg.weight': 'RCNN_rpn.RPN_bbox_pred.weight',
            'rpn_head.obj_reg.bias': 'RCNN_rpn.RPN_bbox_pred.bias',
            'bbox_head.shared_layers.0.weight': 'RCNN_top.0.weight',
            'bbox_head.shared_layers.0.bias': 'RCNN_top.0.bias',
            'bbox_head.shared_layers.3.weight': 'RCNN_top.3.weight',
            'bbox_head.shared_layers.3.bias': 'RCNN_top.3.bias',
            'bbox_head.cls_fc.weight': 'RCNN_cls_score.weight',
            'bbox_head.cls_fc.bias': 'RCNN_cls_score.bias',
            'bbox_head.reg_fc.weight': 'RCNN_bbox_pred.weight',
            'bbox_head.reg_fc.bias': 'RCNN_bbox_pred.bias'
        }
        for k in list(dstate_dict.keys()):
            dstate_dict[k] = state_dict['model'][key_mapper[k]]
        detector.load_state_dict(dstate_dict)

        dataset = build_dataset(cfg.dataset.test)
        dataloader = DataLoader(dataset)

        cls_results = {name: [] for name in dataset.class_name}
        bar = progressbar.ProgressBar(len(dataloader))
        for batch_i, batch in enumerate(dataloader):
            with torch.no_grad():
                detector.eval()
                img, img_meta = batch['img'].cuda(), batch['img_meta']
                det_bboxes, det_labels = detector(img, img_meta)
            for b in range(img.size(0)):
                for bi, bbox in enumerate(det_bboxes[b]):
                    name = dataset.class_name[det_labels[b][bi] - 1]
                    cls_results[name].append(
                        dict(bbox=[
                            bbox[0] - bbox[2] / 2 + 0.5,
                            bbox[1] - bbox[3] / 2 + 0.5,
                            bbox[0] + bbox[2] / 2 - 0.5,
                            bbox[1] + bbox[3] / 2 - 0.5, bbox[-1]
                        ],
                             filename=img_meta['filename'][b]))
            bar.update()

        # save in voc format
        os.mkdir(result)
        for cls in cls_results:
            rs = cls_results[cls]
            with open(os.path.join(result, cls + '.txt'), 'wt') as f:
                for r in rs:
                    f.write('{:s} {:.8f} {:.8f} {:.8f} {:.8f} {:.8f}\n'.format(
                        r['filename'], r['bbox'][-1], r['bbox'][0],
                        r['bbox'][1], r['bbox'][2], r['bbox'][3]))

    if eval:
        # eval result
        aps = []
        imagesetfile = os.path.join(cfg.data_root, 'ImageSets/Main',
                                    'test.txt')
        annopath = os.path.join(cfg.data_root, 'Annotations', '{}.xml')
        for ci, cls in enumerate(class_name):
            rec, prec, ap = voc_eval(os.path.join(result, cls + '.txt'),
                                     annopath,
                                     imagesetfile,
                                     cls,
                                     os.path.join(result, '.cache'),
                                     use_07_metric=False)
            aps.append(ap)

            print('AP for {} = {:.4f}'.format(cls, ap))
        print('Mean AP = {:.4f}'.format(np.mean(aps)))
コード例 #6
0
ファイル: single_gpu_trainer.py プロジェクト: wavce/letsdet
    def __init__(self, cfg, logger):
        self.logger = logger
        use_mixed_precision = cfg.dtype in ["float16", "FP16"]
        if use_mixed_precision:
            tf.keras.mixed_precision.set_global_policy("mixed_float16")
            print("Using mixed precision training.")

        self.train_dataset = build_dataset(
            dtype=tf.float16 if use_mixed_precision else tf.float32,
            **cfg.train.dataset.as_dict())
        self.val_dataset = build_dataset(
            dtype=tf.float16 if use_mixed_precision else tf.float32,
            **cfg.val.dataset.as_dict())

        if cfg.train.get("proposal_layer"):
            self.detector = build_detector(
                cfg.detector, cfg=cfg, proposal_cfg=cfg.train.proposal_layer)
        else:
            self.detector = build_detector(cfg.detector, cfg=cfg)

        self.detector.load_pretrained_weights(
            cfg.train.pretrained_weights_path)

        train_steps_per_epoch = cfg.train.dataset.num_samples // cfg.train.dataset.batch_size
        self.total_train_steps = cfg.train.scheduler.train_epochs * train_steps_per_epoch
        self.warmup_steps = cfg.train.scheduler.warmup.steps
        self.warmup_learning_rate = cfg.train.scheduler.warmup.warmup_learning_rate
        self.learning_rate_scheduler = build_learning_rate_scheduler(
            **cfg.train.scheduler.learning_rate_scheduler.as_dict(),
            train_steps=self.total_train_steps,
            warmup_steps=self.warmup_steps,
            train_steps_per_epoch=train_steps_per_epoch)

        optimizer = build_optimizer(learning_rate=self.learning_rate_scheduler,
                                    **cfg.train.optimizer.as_dict())

        if use_mixed_precision:
            optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
                optimizer, dynamic=True)
            self.logger.info("Using mixed precision training.")

        self.optimizer = optimizer
        self.use_mixed_precision = use_mixed_precision
        self.cfg = cfg

        self.global_step = tf.Variable(initial_value=0,
                                       trainable=False,
                                       name="global_step",
                                       dtype=tf.int64)

        self.val_steps = tf.Variable(0,
                                     trainable=False,
                                     name="val_steps",
                                     dtype=tf.int64)

        self.checkpoint = tf.train.Checkpoint(optimizer=self.optimizer,
                                              detector=self.detector.detector,
                                              global_step=self.global_step,
                                              val_steps=self.val_steps)
        self.manager = tf.train.CheckpointManager(
            checkpoint=self.checkpoint,
            directory=cfg.train.checkpoint_dir,
            max_to_keep=10)

        self.epochs = 0
        latest_checkpoint = self.manager.latest_checkpoint
        if latest_checkpoint is not None:
            # try:
            #     steps = int(latest_checkpoint.split("-")[-1])
            #     self.global_step.assign(steps)
            #     self.epochs = steps // train_steps_per_epoch
            # except:
            #     self.global_step.assign(0)
            self.checkpoint.restore(latest_checkpoint)
            self.logger.info("Restored weights from %s.", latest_checkpoint)
        else:
            self.global_step.assign(0)

        self.summary_writer = tf.summary.create_file_writer(
            logdir=cfg.train.summary_dir)
        self.log_every_n_steps = cfg.train.log_every_n_steps
        self.use_jit = tf.config.optimizer.get_jit() is not None

        self.train_loss_metrics = {
            "l2_loss": tf.keras.metrics.Mean(),
            "loss": tf.keras.metrics.Mean()
        }
        self.val_loss_metrics = {
            "l2_loss": tf.keras.metrics.Mean(),
            "loss": tf.keras.metrics.Mean()
        }
        self.ap_metric = None
        self._add_graph = True
        self.ap_metric = metrics.mAP(self.cfg.num_classes)