示例#1
0
def main():
    args = parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method='env://')
        synchronize()

    cfg.merge_from_file(args.config)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    save_dir = ''
    logger = setup_logger('ssd', save_dir, get_rank())
    logger.info(f'Using {num_gpus} GPUs.')

    logger.info(f'Called with args:\n{args}')
    logger.info(f'Running with config:\n{cfg}')

    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)

    output_dir = cfg.OUTPUT_DIR
    checkpointer = Checkpointer(model, save_dir=output_dir)
    ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt
    _ = checkpointer.load(ckpt, use_latest=args.ckpt is None)

    output_folders = [None] * len(cfg.DATASETS.TEST)
    dataset_names = cfg.DATASETS.TEST
    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            if not os.path.exists(output_folder):
                os.makedirs(output_folder)
            output_folders[idx] = output_folder

    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        is_distributed=distributed)
    for output_folder, dataset_name, data_loader_val in zip(
            output_folders, dataset_names, data_loaders_val):
        inference(
            cfg,
            model,
            data_loader_val,
            dataset_name=dataset_name,
            device=device,
            output_dir=output_folder,
        )
        synchronize()
def train(cfg, args):
    # 工厂模式,加载日志文件设置,这里暂时不同管
    logger = logging.getLogger('SSD.trainer')
    # 建立目标检测模型
    model = build_detection_model(cfg)
    # 设置Device并且把模型部署到设备上
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)

    # 设置学习率、优化器还有学习率变化步长,可以理解为模拟退火这种,前面的步长比较大,后面的步长比较小
    lr = cfg.SOLVER.LR * args.num_gpus  # scale by num gpus
    optimizer = make_optimizer(cfg, model, lr)

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}
    save_to_disk = dist_util.get_rank() == 0
    # **** 这里应该是从断点开始对模型进行训练 ****
    checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger)
    extra_checkpoint_data = checkpointer.load()
    arguments.update(extra_checkpoint_data)

    # Important 通过torch的形式去加载数据集
    # 关键在于如何加载数据集,模型的构建过程可以简单地看成是黑盒
    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
    train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration'])

    # 正式开始训练, 暂时先不训练?
    # 不对,不训练也得加载数据集**** 暂时不训练就完事了 *** 直接看数据加载过程
    # model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args)
    return model
示例#3
0
def evaluation(cfg, ckpt, distributed):
    logger = logging.getLogger("SSD.inference")

    model = build_detection_model(cfg)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    model.eval()
    device = torch.device(cfg.MODEL.DEVICE)

    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        distributed=distributed)
    for dataset_name, data_loader in zip(cfg.DATASETS.TEST, data_loaders_val):
        res = []

        for batch in data_loader:
            images, targets, image_ids = batch
            with torch.no_grad():
                torch.cuda.synchronize(device)
                start = time.time()
                outputs = model(images.to(device))
                torch.cuda.synchronize(device)
                end = time.time()
                res.append(end - start)
        time_sum = 0.0
        for i in res:
            time_sum += i

        print("FPS: %f" % (float(len(res) * cfg.TEST.BATCH_SIZE) / time_sum))
示例#4
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    # dummy_input = torch.randn(1, 3, 300, 300, device='cuda')
    # input_names = ["input"]
    # output_names = ["output"]
    # torch.onnx.export(model, dummy_input, "vgg_ssd300_voc.onnx", verbose=True, input_names=input_names, output_names=output_names)

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        drawn_image = draw_boxes(image, boxes, labels, scores,
                                 class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))
示例#5
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    parser = argparse.ArgumentParser(description='SSD FLOPs')
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument(
        "--in_size",
        default=300,
        help="input size",
        type=int,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()
    cfg.merge_from_list(args.opts)
    cfg.merge_from_file(args.config_file)
    cfg.freeze()
    Model = build_detection_model(cfg).backbone
    summary(Model, torch.rand((1, 3, args.in_size, args.in_size)))
示例#6
0
def train(cfg, args):
    logger = logging.getLogger('SSD.trainer')
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank)

    lr = cfg.SOLVER.LR * args.num_gpus  # scale by num gpus
    optimizer = make_optimizer(cfg, model, lr)

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}
    save_to_disk = dist_util.get_rank() == 0
    checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR,
                                save_to_disk, logger)
    extra_checkpoint_data = checkpointer.load()
    arguments.update(extra_checkpoint_data)

    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
    train_loader = make_data_loader(cfg,
                                    is_train=True,
                                    distributed=args.distributed,
                                    max_iter=max_iter,
                                    start_iter=arguments['iteration'])

    model = do_train(cfg, model, train_loader, optimizer, scheduler,
                     checkpointer, device, arguments, args)
    return model
def Net(model_path):
    cfg.merge_from_file('configs/efficient_net_b3_ssd300_voc0712.yaml')
    model = build_detection_model(cfg)
    state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['model']
    model.load_state_dict(state_dict)
    #model.eval()
    return model
示例#8
0
def creat_model(cfg, ckpt):
    device = torch.device(cfg.MODEL.DEVICE)
    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))
    return model
示例#9
0
def evaluation(cfg, ckpt, distributed):
    logger = logging.getLogger('SSD.inference')

    model = build_detection_model(cfg)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    do_evaluation(cfg, model, distributed)
示例#10
0
def train(cfg, args):
    logger = logging.getLogger('SSD.trainer')
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank)

    lr = cfg.SOLVER.LR * args.num_gpus  # scale by num gpus
    optimizer = make_optimizer(cfg, model, lr)

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}
    save_to_disk = dist_util.get_rank() == 0
    checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR,
                                save_to_disk, logger)
    extra_checkpoint_data = checkpointer.load()
    arguments.update(extra_checkpoint_data)

    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
    train_loader = make_data_loader(cfg,
                                    is_train=True,
                                    distributed=args.distributed,
                                    max_iter=max_iter,
                                    start_iter=arguments['iteration'])

    # macs, params = profile(model, inputs=(input, ))
    #
    # macs, params = clever_format([flops, params], "%.3f")

    # net = model.to()
    # with torch.cuda.device(0):

    # net = model.to(device)
    # macs, params = get_model_complexity_info(net, (3, 512, 512), as_strings=True,
    #                                        print_per_layer_stat=True, verbose=True)
    # print('{:<30}  {:<8}'.format('Computational complexity: ', macs))
    # print('{:<30}  {:<8}'.format('Number of parameters: ', params))

    n_params = sum(p.numel() for name, p in model.named_parameters()
                   if p.requires_grad)
    print(n_params)
    #
    # model = net
    # inputs = torch.randn(1, 3, 300, 300) #8618 305
    # inputs = torch.randn(1, 3, 300, 300)

    # macs = profile_macs(model, inputs)
    # print(macs)

    model = do_train(cfg, model, train_loader, optimizer, scheduler,
                     checkpointer, device, arguments, args)
    return model
示例#11
0
def evaluation(cfg, ckpt, distributed):
    logger = logging.getLogger("SSD.inference")

    model = build_detection_model(cfg)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger)
    device = torch.device(cfg.MODEL.DEVICE)
    #model.load_state_dict(torch.load('outputs/vgg_ssd300_voc0712.pth'), strict=False)
    model.to(device)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    do_evaluation(cfg, model, distributed)
示例#12
0
def evaluation(cfg, ckpt, distributed):
    logger: logging.RootLogger = logging.getLogger("SSD.inference")

    model = build_detection_model(cfg)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    checkpointer.load(ckpt, use_latest=ckpt is None)

    for scale in np.linspace(0.5, 1.0, 5):
        logger.info(f"Running eval with rescale factor: {scale}")
        eval_result = do_evaluation(cfg, model, distributed, rescale=scale)
示例#13
0
def train(cfg, local_rank, distributed):
    model = build_detection_model(cfg)
    device = torch.device(cfg.DEVICE)
    model.to(device)

    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)

    use_mixed_precision = cfg.DTYPE == 'float16'
    amp_opt_level = 'O1' if use_mixed_precision else 'O0'
    model, optimizer = amp.initialize(model,
                                      optimizer,
                                      opt_level=amp_opt_level)

    if distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model,
            device_ids=[local_rank],
            output_device=local_rank,
            broadcast_buffers=False)

    arguments = {}
    arguments['iteration'] = 0

    output_dir = cfg.OUTPUT_DIR

    save_to_disk = get_rank() == 0

    checkpointer = Checkpointer(model, optimizer, scheduler, output_dir,
                                save_to_disk)
    extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT)
    arguments.update(extra_checkpoint_data)

    data_loader = make_data_loader(cfg,
                                   is_train=True,
                                   is_distributed=distributed,
                                   start_iter=arguments['iteration'])

    checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD

    do_train(
        model,
        data_loader,
        optimizer,
        scheduler,
        checkpointer,
        device,
        checkpoint_period,
        arguments,
    )

    return model
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
    parser = argparse.ArgumentParser(description='SSD WEIGHTS')
    parser.add_argument(
        "--config-file",
        default="",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument(
        "--ckpt",
        default='model_final.pth',
        type=str,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()
    cfg.merge_from_list(args.opts)
    cfg.merge_from_file(args.config_file)
    # cfg.freeze()
    cfg.MODEL.BACKBONE.PRETRAINED = False
    name=cfg.OUTPUT_DIR.split('/')[1]
    model_path = '/home/xpt/SSD-e/outputs/'+name+'/'+args.ckpt
    np.set_printoptions(threshold=sys.maxsize)  # 全部输出,无省略号
    np.set_printoptions(suppress=True)  # 不用指数e
    state = torch.load(model_path, map_location=torch.device('cpu'))    # print(state['model'])
    file = open('weights/' + name + '_para.txt', 'w')
    model = state['model']

    if cfg.TEST.BN_FUSE is True:
        print('BN_FUSE.')
        Model = build_detection_model(cfg)
        # print(Model)
        Model.load_state_dict(model)
        Model.backbone.bn_fuse()
        model=Model.state_dict()
    for name in model:
        print(name)
        para = model[name]
        print(para.shape)
        file.write(str(name) + ':\n')
        file.write('shape:' + str(para.shape) + '\n')
        file.write('para:\n' + str(para.cpu().data.numpy()) + '\n')
    file.close()
示例#15
0
def evaluation(cfg, ckpt, distributed, model_path=None):
    logger = logging.getLogger("SSD.inference")
    model = build_detection_model(cfg)
    logger.info("Model :\n{}".format(model))  #如果用print,多gpu会打印两便
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR, logger=logger)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if model_path is None:
        checkpointer.load(ckpt, use_latest=ckpt is None)
    else:
        model.load_state_dict(torch.load(model_path))
    if cfg.TEST.BN_FUSE is True:
        print('BN_FUSE.')
        model.backbone.bn_fuse()
        model.to(device)
    do_evaluation(cfg, model, distributed)
示例#16
0
def main(video, config):
    class_name = ('__background__', 'lubang', 'retak aligator',
                  'retak melintang', 'retak memanjang')

    cfg.merge_from_file(config)
    cfg.freeze()

    ckpt = None
    device = torch.device(cfg.MODEL.DEVICE)
    model = build_detection_model(cfg)
    model.to(device)

    checkpoint = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpoint.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpoint.get_checkpoint_file()
    print(f'Loading weight from {weight_file}')
示例#17
0
 def __init__(self, _device="cuda", _cfg = cfg, _ckpt = ssd_model, _threshold = 0.75):
     print("Init detector")
     self.isReady = False
     self.dataset_type = "voc"
     self.class_names = VOCDataset.class_names
     self.device = _device
     self.config_file = ssd_config
     self.cfg = _cfg
     self.cfg.merge_from_file(self.config_file)
     self.cfg.freeze()
     self.model = build_detection_model(self.cfg)
     self.model = self.model.to(self.device)
     self.ckpt = _ckpt
     self.checkpointer = CheckPointer(self.model, save_dir=self.cfg.OUTPUT_DIR)
     self.checkpointer.load(self.ckpt, use_latest=self.ckpt is None)
     self.weight_file = self.ckpt if self.ckpt else self.checkpointer.get_checkpoint_file()
     self.threshold = _threshold
示例#18
0
def train(cfg, args):
    logger = logging.getLogger('SSD.trainer')
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if args.distributed:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank)

    lr = cfg.SOLVER.LR * args.num_gpus  # scale by num gpus
    optimizer = make_optimizer(cfg, model, lr)

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}
    save_to_disk = dist_util.get_rank() == 0
    checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR,
                                save_to_disk, logger)
    extra_checkpoint_data = checkpointer.load(args.ckpt)
    arguments.update(extra_checkpoint_data)

    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
    train_loader = make_data_loader(cfg,
                                    is_train=True,
                                    distributed=args.distributed,
                                    max_iter=max_iter,
                                    start_iter=arguments['iteration'])

    logging.info('==>Start statistic')
    do_run(cfg, model, distributed=args.distributed)
    logging.info('==>End statistic')

    for ops in model.modules():
        if isinstance(ops, torch.nn.ReLU):
            ops.collectStats = False

            #            ops.c.data = ops.running_mean + (ops.running_b * laplace[args.actBitwidth])
            ops.c.data = ops.running_mean + (3 * ops.running_std)
            ops.quant = True
    torch.cuda.empty_cache()
    model = do_train(cfg, model, train_loader, optimizer, scheduler,
                     checkpointer, device, arguments, args)
    return model
示例#19
0
def train(cfg: CfgNode,
          args: Namespace,
          output_dir: Path,
          model_manager: Dict[str, Any],
          freeze_non_sigma: bool = False):
    logger = logging.getLogger('SSD.trainer')
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank)

    lr = cfg.SOLVER.LR * args.num_gpus  # scale by num gpus
    optimizer = make_optimizer(cfg, model, lr)

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}
    save_to_disk = dist_util.get_rank() == 0
    checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR,
                                save_to_disk, logger)
    resume_from = checkpointer.get_best_from_experiment_dir(cfg)
    extra_checkpoint_data = checkpointer.load(f=resume_from)
    arguments.update(extra_checkpoint_data)

    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
    train_loader = make_data_loader(cfg,
                                    is_train=True,
                                    distributed=args.distributed,
                                    max_iter=max_iter,
                                    start_iter=arguments['iteration'])

    # Weight freezing test:
    # print_model(model)
    # freeze_weights(model)
    print_model(model)

    model = do_train(cfg, model, train_loader, optimizer, scheduler,
                     checkpointer, device, arguments, args, output_dir,
                     model_manager)
    return model
示例#20
0
文件: train.py 项目: touchylk/lwk_SSD
def train(cfg, args):
    logger = logging.getLogger('SSD.trainer')
    model = build_detection_model(cfg)  # 建立模型
    device = torch.device(cfg.MODEL.DEVICE)  # 看cfg怎么组织的,把文件和args剥离开
    model.to(device)
    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.local_rank], output_device=args.local_rank)
        # model = nn.DataParallel(model)

    lr = cfg.SOLVER.LR * args.num_gpus  # scale by num gpus
    optimizer = make_optimizer(cfg, model, lr)  # 建立优化器

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}
    save_to_disk = dist_util.get_rank() == 0
    checkpointer = CheckPointer(model,
                                optimizer,
                                scheduler,
                                save_dir=cfg.OUTPUT_DIR,
                                save_to_disk=save_to_disk,
                                logger=logger)
    # 建立模型存储载入类,给save_dir赋值表示
    extra_checkpoint_data = checkpointer.load(f='', use_latest=False)  # 载入模型
    arguments.update(extra_checkpoint_data)

    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
    train_loader = make_data_loader(cfg,
                                    is_train=True,
                                    distributed=args.distributed,
                                    max_iter=max_iter,
                                    start_iter=arguments['iteration'])  # 建立数据库

    print("dataloader: ", train_loader.batch_size)
    # exit(1232)
    model = do_train(cfg, model, train_loader, optimizer, scheduler,
                     checkpointer, device, arguments, args)  # 训练
    return model
示例#21
0
def get_model_info(args, cfg):
    print(cfg)
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    # Magic number. Just plugged in a value to satisfy the error message:
    if cfg.INPUT.IMAGE_SIZE == 512:
        NUM_PRIORS = 24564
    elif cfg.INPUT.IMAGE_SIZE == 300:
        NUM_PRIORS = 10830
    else:
        raise "No implemented"
    for show_input in [False, True]:
        summary(
            model,
            torch.zeros((1, 3, cfg.INPUT.IMAGE_SIZE, cfg.INPUT.IMAGE_SIZE)).to(device),
            {
                "labels": torch.ones((1, NUM_PRIORS)).type(torch.LongTensor).to(device),
                "boxes": torch.ones((1, NUM_PRIORS, 4)).type(torch.FloatTensor).to(device),
            },
            show_input=show_input
        )
示例#22
0
    def update_config(self,
                      config_file,
                      dataset_type,
                      weight,
                      score_threshold=0.5,
                      targets=["person"]):
        if dataset_type == "voc":
            self.class_names = VOCDataset.class_names
        elif dataset_type == "coco":
            self.class_names = COCODataset.class_names
        else:
            raise NotImplementedError('Not implemented now.')
        self.target_labels = []
        if targets is None:
            self.target_labels = [i for i in range(len(self.class_names))]
        else:
            for idx in range(len(self.class_names)):
                if self.class_names[idx] in targets:
                    self.target_labels.append(idx)
        self.cfg = cfg
        self.cfg.merge_from_file(config_file)
        self.cfg.freeze()
        print("Loaded configuration file {}".format(config_file))
        with open(config_file, "r") as cf:
            config_str = "\n" + cf.read()
#            print(config_str)
#        print("Running SSD with config:\n{}".format(self.cfg))

        self.device = torch.device(self.cfg.MODEL.DEVICE)
        self.model = build_detection_model(self.cfg)
        self.model = self.model.to(self.device)
        self.checkpointer = CheckPointer(self.model)
        self.checkpointer.load(weight)
        self.cpu_device = torch.device("cpu")
        self.score_threshold = score_threshold
        self.transforms = build_transforms(self.cfg, is_train=False)
        self.model.eval()
示例#23
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir):
    class_names = VOCDataset.class_names
    device = torch.device(cfg.MODEL.DEVICE)
    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.bmp'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        text = ['__background__']
        resDic = {}
        for j in range(len(boxes)):
            xmin = int(boxes[j, 0])
            ymin = int(boxes[j, 1])
            xmax = int(boxes[j, 2])
            ymax = int(boxes[j, 3])

            if labels[j] == 1:
                xmin += 140
                xmax -= 130
            elif labels[j] == 2:
                xmin += 130
            elif labels[j] == 4:
                xmin += 40

            hight = ymax - ymin
            width = xmax - xmin

            cropImg = image[ymin:ymin + hight, xmin:xmin + width]
            cropImg = local_threshold(cropImg)

            boxes[j, 0] = xmin
            boxes[j, 1] = ymin
            boxes[j, 2] = xmax
            boxes[j, 3] = ymax
            text_tmp = crnnOcr(Image.fromarray(cropImg))

            if labels[j] == 2:
                text_tmp = re.sub('[^\x00-\xff]', '/', text_tmp)

            text.append(text_tmp)
            resDic[class_names[labels[j]]] = text_tmp

        result = json.dumps(resDic, ensure_ascii=False)
        print(result)
 def get_model(self, cfg, weightfile):
     model = build_detection_model(cfg)
     model = model.to(self.device)
     checkpointer = CheckPointer(model)
     checkpointer.load(weightfile, use_latest=weightfile is None)
     return model
示例#25
0
config = './configs/config.yaml'
image_input = cv2.imread('frame_75.jpg')
output_dir = './outputs/ssd_custom_coco_format'
result_file = './results/feature_maps_frame75.jpg'

class_name = {
    '__background__', 'lubang', 'retak aligator', 'retak melintang',
    'retak memanjang'
}

cfg.merge_from_file(config)
cfg.freeze()
ckpt = None
device = torch.device('cpu')
model = build_detection_model(cfg)
model.to(device)

checkpoint = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
checkpoint.load(ckpt, use_latest=ckpt is None)
weight_file = ckpt if ckpt else checkpoint.get_checkpoint_file()
transforms = build_transforms(cfg, is_train=False)
model.eval()

conv_layers = []

model_children = list(model.children())
print(len(model_children))
print(type(model_children[0]))
print(type(model_children[1]))
示例#26
0
def active_train(cfg, args):
    logger = logging.getLogger("SSD.trainer")
    raw_model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    raw_model.to(device)

    lr = cfg.SOLVER.LR * args.num_gpus
    optimizer = make_optimizer(cfg, raw_model, lr)

    milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
    scheduler = make_lr_scheduler(cfg, optimizer, milestones)

    arguments = {"iteration": 0}

    checkpointer = None
    save_to_disk = dist_util.get_rank() == 0
    checkpointer = CheckPointer(raw_model, optimizer, scheduler,
                                args.model_dir, save_to_disk, logger)

    max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus

    is_train = True
    train_transform = build_transforms(cfg, is_train=is_train)
    target_transform = build_target_transform(cfg) if is_train else None
    dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
    datasets = build_dataset(dataset_list,
                             transform=train_transform,
                             target_transform=target_transform,
                             is_train=is_train)

    logger.info(f'Creating query loader...')
    query_loader = QueryLoader(datasets[0], args, cfg)

    logger.info(f'Creating al model...')
    strategy = get_strategy(args.strategy)
    model = ALModel(raw_model, strategy, optimizer, device, scheduler,
                    arguments, args, checkpointer, cfg)

    logger.info(f'Training on initial data with size {args.init_size}...')
    n_bbox = query_loader.len_annotations()
    t1 = time.time()
    model.fit(query_loader.get_labeled_loader())
    init_time = time.time() - t1
    logger.info(f'Scoring after initial training...')
    score = model.score()
    logger.info(f'SCORE : {score:.4f}')

    fields = [
        args.strategy, {}, 0, score, init_time, 0, init_time,
        len(query_loader), n_bbox
    ]
    save_to_csv(args.filename, fields)

    for step in range(args.query_step):
        logger.info(f'STEP NUMBER {step}')
        logger.info('Querying assets to label')
        t1 = time.time()
        query_idx = model.query(
            unlabeled_loader=query_loader.get_unlabeled_loader(),
            cfg=cfg,
            args=args,
            step=step,
            n_instances=args.query_size,
            length_ds=len(datasets[0]))
        logger.info('Adding labeled samples to train dataset')
        query_loader.add_to_labeled(query_idx, step + 1)
        t2 = time.time()
        logger.info('Fitting with new data...')
        model.fit(query_loader.get_labeled_loader())
        total_time = time.time() - t1
        train_time = time.time() - t2
        active_time = total_time - train_time
        logger.info('Scoring model...')
        score = model.score()
        n_bbox = query_loader.len_annotations()
        fields = [
            args.strategy, {}, step + 1, score, train_time, active_time,
            total_time,
            len(query_loader), n_bbox
        ]
        save_to_csv(args.filename, fields)
        logger.info(f'SCORE : {score:.4f}')

    return model.model
示例#27
0
def main():
    st.title('Pavement Distress Detector')
    st.markdown(get_file_content_as_string('./introduction.md'))
    st.sidebar.markdown(get_file_content_as_string('./documentation.md'))
    caching.clear_cache()
    video = video_uploader('./input')
    config = config_uploader('./configs')
    output_dir = checkpoint_folder('./outputs')

    filename = f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{os.path.splitext(os.path.basename(config))[0]}"
    output_file = './results'
    #score_threshold = st.slider('Confidence Threshold', 0.0, 1.0, 0.5)
    #fps_threshold = st.slider('Counting Every (frames)', 10, 30, 20)
    score_threshold = 0.5
    fps_threshold = 20
    video_filename = f'{output_file}/{filename}.mp4'
    labels_filename = f'{output_file}/{filename}.txt'

    if st.button('Click here to run'):
        if (os.path.isdir(video) == False and os.path.isdir(config) == False
                and output_dir != './outputs/'):
            class_name = ('__background__', 'lubang', 'retak aligator',
                          'retak melintang', 'retak memanjang')

            cfg.merge_from_file(config)
            cfg.freeze()

            ckpt = None
            device = torch.device(cfg.MODEL.DEVICE)
            model = build_detection_model(cfg)
            model.to(device)

            checkpoint = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
            checkpoint.load(ckpt, use_latest=ckpt is None)
            weight_file = ckpt if ckpt else checkpoint.get_checkpoint_file()
            st.write(f'Loading weight from {weight_file}')
            cpu_device = torch.device('cpu')
            transforms = build_transforms(cfg, is_train=False)
            model.eval()

            clip = VideoFileClip(video)

            with tempfile.NamedTemporaryFile(
                    suffix='.avi'
            ) as temp:  #using temporary file because streamlit can't read opencv video result
                temp_name = temp.name
                pavement_distress(video, clip, fps_threshold, score_threshold,
                                  temp_name, labels_filename, transforms,
                                  model, device, cpu_device, class_name)

                result_clip = VideoFileClip(temp_name)
                st.write('Please wait, prepraring result...')
                result_clip.write_videofile(video_filename)

            video_file = open(video_filename, 'rb')
            video_bytes = video_file.read()
            st.video(video_bytes)

        elif (os.path.isdir(video) == True and os.path.isdir(config) == False
              and output_dir != './outputs/'):
            st.warning('Please select video file')

        elif (os.path.isdir(video) == True and os.path.isdir(config) == True
              and output_dir != './outputs/'):
            st.warning('Please select video file and config file')

        elif (os.path.isdir(video) == False and os.path.isdir(config) == True
              and output_dir != './outputs/'):
            st.warning('Please select config file')

        elif (os.path.isdir(video) == True and os.path.isdir(config) == False
              and output_dir == './outputs/'):
            st.warning('Please select video file and checkpoint folder')

        elif (os.path.isdir(video) == False and os.path.isdir(config) == False
              and output_dir == './outputs/'):
            st.warning('Please select checkpoint folder')

        elif (os.path.isdir(video) == False and os.path.isdir(config) == True
              and output_dir == './outputs/'):
            st.warning('Please select config file and checkpoint folder')

        else:
            st.warning(
                'Please select video file, config file, and checkpoint folder')
示例#28
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, onnx_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)
    device = "cpu" if not torch.cuda.is_available() else device

    model = build_detection_model(cfg)
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    cpu_device = torch.device("cpu")
    transforms = build_transforms(cfg, is_train=False)
    model.eval()

    # get model ready for onnx export
    mkdir(onnx_dir)
    model_onnx = build_detection_model(cfg)
    model_onnx = model_onnx.to(device)
    checkpointer_onnx = CheckPointer(model_onnx, save_dir=cfg.OUTPUT_DIR)
    checkpointer_onnx.load(ckpt, use_latest=ckpt is None)
    # replace the SSD box head postprocessor with the onnx version for exporting
    model_onnx.box_head.post_processor = PostProcessorOnnx(cfg)
    model_onnx.eval()

    # export with ONNX
    # onnx modle takes the name of the pth ckpt file
    model_onnx_name = os.path.basename(ckpt).split('.')[0] + ".onnx"
    model_onnx_path = os.path.join(onnx_dir, model_onnx_name)
    if not os.path.exists(model_onnx_path):
        print(f'Model exported as onnx to {model_onnx_path}')
        dummy_input = torch.zeros(
            [1, 3, cfg.INPUT.IMAGE_SIZE, cfg.INPUT.IMAGE_SIZE]).to(device)
        torch.onnx.export(model_onnx,
                          dummy_input,
                          model_onnx_path,
                          export_params=True,
                          do_constant_folding=True,
                          opset_version=11,
                          input_names=['input'],
                          output_names=['boxes', 'scores', 'labels'],
                          dynamic_axes={
                              'input': {0: 'batch_size', 2: "height", 3: "width"}},
                          verbose=False)

    # load exported onnx model for inference test
    print(
        f'Loading exported onnx model from {model_onnx_path} for inference comparison test')
    onnx_runtime_sess = onnxruntime.InferenceSession(model_onnx_path)

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)

        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start
        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result['scores']

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[indices]
        meters = ' | '.join(
            [
                'objects {:02d}'.format(len(boxes)),
                'load {:03d}ms'.format(round(load_time * 1000)),
                'inference {:03d}ms'.format(round(inference_time * 1000)),
                'FPS {}'.format(round(1.0 / inference_time))
            ]
        )
        print('Pytorch: ({:04d}/{:04d}) {}: {}'.format(i + 1,
                                                       len(image_paths), image_name, meters))
        drawn_image = draw_boxes(image, boxes, labels,
                                 scores, class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(
            os.path.join(output_dir, "pytorch_" + image_name))

        """
        Compute ONNX Runtime output prediction
        """

        start = time.time()
        ort_inputs = {onnx_runtime_sess.get_inputs()[0].name: np.array(images)}
        boxes, scores, labels = onnx_runtime_sess.run(None, ort_inputs)
        inference_time = time.time() - start

        indices = scores > score_threshold
        boxes, labels, scores = boxes[indices], labels[indices], scores[indices]
        # resize bounding boxes to size of the original image
        boxes[:, 0::2] *= (width)
        boxes[:, 1::2] *= (height)
        meters = ' | '.join(
            [
                'objects {:02d}'.format(len(boxes)),
                'load {:03d}ms'.format(round(load_time * 1000)),
                'inference {:03d}ms'.format(round(inference_time * 1000)),
                'FPS {}'.format(round(1.0 / inference_time))
            ]
        )
        print('Onnx:    ({:04d}/{:04d}) {}: {}'.format(i + 1,
                                                       len(image_paths),
                                                       image_name, meters))
        drawn_image = draw_boxes(image, boxes, labels,
                                 scores, class_names).astype(np.uint8)
        Image.fromarray(drawn_image).save(
            os.path.join(output_dir, "onnx_" + image_name))
示例#29
0
def train(cfg):
    logger = logging.getLogger('SSD.trainer')
    model = build_detection_model(cfg)
    device = torch.device(cfg.MODEL.DEVICE)
    model.to(device)
    return model
示例#30
0
def run_demo(cfg, ckpt, score_threshold, images_dir, output_dir, dataset_type):
    if dataset_type == "voc":
        class_names = VOCDataset.class_names
    elif dataset_type == 'coco':
        class_names = COCODataset.class_names
    else:
        raise NotImplementedError('Not implemented now.')
    device = torch.device(cfg.MODEL.DEVICE)
    smoke_name_dic = ('__background__', '一次性快餐盒', '书籍纸张', '充电宝', '剩饭剩菜', '包',
                      '垃圾桶', '塑料器皿', '塑料玩具', '塑料衣架', '大骨头', '干电池', '快递纸袋',
                      '插头电线', '旧衣服', '易拉罐', '枕头', '果皮果肉', '毛绒玩具', '污损塑料',
                      '污损用纸', '洗护用品', '烟蒂', '牙签', '玻璃器皿', '砧板', '筷子', '纸盒纸箱',
                      '花盆', '茶叶渣', '菜帮菜叶', '蛋壳', '调料瓶', '软膏', '过期药物', '酒瓶',
                      '金属厨具', '金属器皿', '金属食品罐', '锅', '陶瓷器皿', '鞋', '食用油桶', '饮料瓶',
                      '鱼骨')

    model = build_detection_model(cfg)
    cpu_device = torch.device("cpu")
    model = model.to(device)
    checkpointer = CheckPointer(model, save_dir=cfg.OUTPUT_DIR)
    checkpointer.load(ckpt, use_latest=ckpt is None)
    weight_file = ckpt if ckpt else checkpointer.get_checkpoint_file()
    print('Loaded weights from {}'.format(weight_file))

    image_paths = glob.glob(os.path.join(images_dir, '*.jpg'))
    mkdir(output_dir)

    transforms = build_transforms(cfg, is_train=False)
    model.eval()
    miss = 0

    for i, image_path in enumerate(image_paths):
        start = time.time()
        image_name = os.path.basename(image_path)
        cv_image = cv2.imread(image_path)
        PIL_image = Image.open(image_path)

        image = np.array(Image.open(image_path).convert("RGB"))
        height, width = image.shape[:2]
        images = transforms(image)[0].unsqueeze(0)
        load_time = time.time() - start

        start = time.time()
        result = model(images.to(device))[0]
        inference_time = time.time() - start

        result = result.resize((width, height)).to(cpu_device).numpy()
        boxes, labels, scores = result['boxes'], result['labels'], result[
            'scores']

        indices = scores > score_threshold
        boxes = boxes[indices]
        labels = labels[indices]
        scores = scores[indices]

        miss = miss + (1 - len(boxes))
        meters = ' | '.join([
            'objects {:02d}'.format(len(boxes)),
            'load {:03d}ms'.format(round(load_time * 1000)),
            'inference {:03d}ms'.format(round(inference_time * 1000)),
            'FPS {}'.format(round(1.0 / inference_time))
        ])
        print('({:04d}/{:04d}) {}: {}'.format(i + 1, len(image_paths),
                                              image_name, meters))

        draw_ = ImageDraw.Draw(PIL_image)
        for c in range(len(scores)):
            text = smoke_name_dic[labels[c]]
            font = ImageFont.truetype(
                '/usr/share/fonts/truetype/arphic/uming.ttc', 40)
            draw_.text((int(boxes[c][0]) + 2, int(boxes[c][1]) - 2),
                       text, (255, 0, 0),
                       font=font)

        cv_image = cv2.cvtColor(np.asarray(PIL_image), cv2.COLOR_RGB2BGR)
        for c in range(len(scores)):
            cv2.rectangle(cv_image, (int(boxes[c][0]), int(boxes[c][1])),
                          (int(boxes[c][2]), int(boxes[c][3])), (0, 0, 255), 4)
        cv2.imwrite(os.path.join(output_dir, image_name), cv_image)
    smoke_count = len(image_paths)
    print("出现:%d 漏掉: %d 漏检率:%.2f" % (smoke_count, miss, miss / smoke_count))