コード例 #1
0
def main():
    # convert to train mode
    config.MODE = 'test'
    extra()

    # create a logger
    logger = create_logger(config, 'test')

    # logging configurations
    logger.info(pprint.pformat(config))

    # cudnn related setting
    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED

    # create a model
    os.environ["CUDA_VISIBLE_DEVICES"] = config.GPUS
    gpus = [int(i) for i in config.GPUS.split(',')]
    gpus = range(gpus.__len__())

    model_rgb = create_model()
    model_rgb.my_load_state_dict(torch.load(config.TEST.STATE_DICT_RGB),
                                 strict=True)
    model_rgb = model_rgb.cuda(gpus[0])

    model_flow = create_model()
    model_flow.my_load_state_dict(torch.load(config.TEST.STATE_DICT_FLOW),
                                  strict=True)
    model_flow = model_flow.cuda(gpus[0])

    # load data
    test_dataset_rgb = get_dataset(mode='test', modality='rgb')
    test_dataset_flow = get_dataset(mode='test', modality='flow')

    test_loader_rgb = torch.utils.data.DataLoader(
        test_dataset_rgb,
        batch_size=config.TEST.BATCH_SIZE * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=True)
    test_loader_flow = torch.utils.data.DataLoader(
        test_dataset_flow,
        batch_size=config.TEST.BATCH_SIZE * len(gpus),
        shuffle=False,
        num_workers=config.WORKERS,
        pin_memory=True)

    result_file_path = test_final(test_dataset_rgb, model_rgb,
                                  test_dataset_flow, model_flow)
    eval_mAP(config.DATASET.GT_JSON_PATH, result_file_path)
コード例 #2
0
ファイル: train.py プロジェクト: Spratumn/CircleDetect
def teacher_train(cfg, start_epoch):
    torch.manual_seed(cfg.SEED)
    device = torch.device('cuda' if cfg.GPU[0] >= 0 else 'cpu')
    if start_epoch == 1:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_log.csv"), 'w')
        train_log_title = "epoch,total_loss,hm_loss,wh_loss"
        val_log = open(os.path.join(cfg.LOG_DIR, "val_log.csv"), 'w')
        val_log_title = "epoch,precision,recall\n"
        if cfg.USE_OFFSET:
            train_log_title += ",offset_loss\n"
        else:
            train_log_title += "\n"
        train_log.write(train_log_title)
        train_log.flush()
        val_log.write(val_log_title)
        val_log.flush()
    else:
        train_log = open(os.path.join(cfg.LOG_DIR, "train_lo.csv"), 'a')
        val_log = open(os.path.join(cfg.LOG_DIR, "val_log.csv"), 'a')

    print('Creating model...')
    teacher = create_model(cfg, 'res_18')
    teacher = load_model(teacher, 'log/weights/model_last_res.pth')
    model = create_model(cfg, 'litnet')
    if start_epoch != 1:
        model = load_model(
            model, 'log/weights/model_epoch_{}.pth'.format(start_epoch - 1))
    optimizer = torch.optim.Adam(model.parameters(), cfg.LR)

    trainer = TeacherTrainer(cfg, teacher, model, optimizer)
    trainer.set_device(cfg.GPU, device)
    print('Setting up data...')
    train_loader = DataLoader(TrainCircleDataset(cfg),
                              batch_size=cfg.BATCH_SIZE,
                              shuffle=True,
                              num_workers=cfg.NUM_WORKERS,
                              pin_memory=True,
                              drop_last=True)
    val_loader = ValCircleDataset()
    print('Starting training...')
    epoch = start_epoch
    for epoch in range(start_epoch, start_epoch + cfg.NUM_EPOCHS):
        trainer.train(epoch, train_loader, train_log)
        model_path = os.path.join(cfg.WEIGHTS_DIR,
                                  'model_epoch_{}.pth'.format(epoch))
        save_model(model_path, epoch, model, optimizer)
        trainer.val(epoch, model_path, val_loader, val_log, cfg)

    save_model(os.path.join(cfg.WEIGHTS_DIR, 'model_last.pth'), epoch, model,
               optimizer)
コード例 #3
0
ファイル: base_detector.py プロジェクト: XrosLiang/RTS3D
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model_image, self.model_point = create_model(opt)
        self.model_image = load_model(self.model_image, opt.load_model,
                                      'image_model')
        self.model_image = self.model_image.to(opt.device)
        self.model_image.eval()

        self.model_point = load_model(self.model_point, opt.load_model,
                                      'point_model')
        self.model_point = self.model_point.to(opt.device)
        self.model_point.eval()

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
        self.image_path = ' '
        self.max_objs = 32
        const = torch.Tensor([[-1, 0], [0, -1], [-1, 0], [0, -1], [-1, 0],
                              [0, -1], [-1, 0], [0, -1], [-1, 0], [0, -1],
                              [-1, 0], [0, -1], [-1, 0], [0, -1], [-1, 0],
                              [0, -1]])
        self.const = const.unsqueeze(0).unsqueeze(0)
        self.const = self.const.to(self.opt.device)
コード例 #4
0
    def __init__(self, opt, frame_rate=30):             # 帧率的意义
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)           # 加载模型,
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]         # 保存一系列追踪中的轨迹
        self.lost_stracks = []  # type: list[STrack]            # 保存已经丢失的轨迹
        self.removed_stracks = []  # type: list[STrack]         # 保存已经移除的轨迹

        self.frame_id = 0
        self.det_thresh = opt.conf_thres                        # 检测框阈值,这里设置为与tracking的置信度阈值相同
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)            # 还是等于输入视频的真实帧率
        self.max_time_lost = self.buffer_size                   # 最大连续self.buffer_size次没有匹配到目标时,表示该轨迹丢失
        self.max_per_image = 128
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()         # 预测,根据上一帧目标的检测位置和速度,预测当前帧目标的检测位置和速度
コード例 #5
0
def preloader():
    current_path = os.path.dirname(os.path.abspath(__file__))

    config_path = f'{current_path}/config/hrnet_plate.yaml'
    seg_weights = f'{current_path}/weights/hrnetv2_hrnet_plate_199.pth'
    output_dir = f'{current_path}/'
    data_name = 'plate'

    cfg = get_cfg_defaults()
    cfg.defrost()
    cfg.merge_from_file(config_path)
    cfg.merge_from_list([
        "train.config_path",
        config_path,
        "train.output_dir",
        output_dir,
        "dataset.data_name",
        data_name,
    ])
    print(
        torch.load(os.path.join(f"{current_path}/data", data_name + ".pth"),
                   map_location=torch.device('cpu')))

    model = create_model(cfg)
    # if torch.cuda.is_available():
    #     model.cuda()
    model = nn.DataParallel(model)
    print(torch.load(seg_weights, map_location=torch.device('cpu')).keys())
    model.load_state_dict(
        torch.load(seg_weights,
                   map_location=torch.device('cpu'))["state_dict"])
    model.eval()
    return model, cfg,
コード例 #6
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv,
                                  num_gnn_layers=opt.num_gnn_layers,
                                  gnn_type=opt.gnn_type,
                                  use_residual=opt.use_residual,
                                  return_pre_gnn_layer_outputs=opt.return_pre_gnn_layer_outputs,
                                  heads_share_params=opt.heads_share_params,
                                  omit_gnn=opt.omit_gnn,
                                  use_roi_align=opt.use_roi_align,
                                  viz_attention=opt.viz_attention)

        self.model = load_model(self.model, opt.load_model, distributed=True, copy_head_weights=False)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        # self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
        self.viz_attention = opt.viz_attention
コード例 #7
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        # input = torch.randn(1, 3, 640, 640, requires_grad=True)
        # input=input.to(opt.device)
        # out = self.model(input)
        # torch.onnx.export(self.model,  # model being run
        #                   input,  # model input (or a tuple for multiple inputs)
        #                   "./test.onnx",  # where to save the model (can be a file or file-like object)
        #                   export_params=True,  # store the trained parameter weights inside the model file# )
        #                   opset_version=9
        #                   )

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
コード例 #8
0
def main(cfg):

    model = create_model('res_50', cfg.MODEL.HEAD_CONV, cfg).cuda()

    weight_path = '/home/tensorboy/data/centerpose/trained_best_model/res_50_best_model.pth'
    state_dict = torch.load(
        weight_path, map_location=lambda storage, loc: storage)['state_dict']
    model.load_state_dict(state_dict)

    onnx_file_path = "./model/resnet50.onnx"

    #img = cv2.imread('test_image.jpg')
    image = cv2.imread('../images/image1.jpg')
    images, meta = pre_process(image, cfg, scale=1)

    model.cuda()
    model.eval()
    model.float()
    torch_input = images.cuda()
    print(torch_input.shape)

    torch.onnx.export(model, torch_input, onnx_file_path, verbose=False)
    sess = nxrun.InferenceSession(onnx_file_path)

    input_name = sess.get_inputs()[0].name
    label_name = sess.get_outputs()[0].name

    print(input_name)
    print(sess.get_outputs()[0].name)
    print(sess.get_outputs()[1].name)
    print(sess.get_outputs()[2].name)
    output_onnx = sess.run(None, {input_name: images.cpu().data.numpy()})
    hm, wh, hps, reg, hm_hp, hp_offset = output_onnx
    print(hm)
    print(len(output_onnx))
コード例 #9
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        # self.tb = SummaryWriter(
        #     log_dir='/home/wanghongwei/WorkSpace/source/detect/CenterNet/log/', comment='dla')
        self.model = create_model(opt.arch,
                                  opt.heads,
                                  opt.head_conv,
                                  vis_graph=opt.vis_graph)
        try:
            self.model = load_model(self.model, opt.load_model)
            self.model = self.model.to(opt.device)
            self.model.eval()
        except Exception as e:
            print(e)
            print('maybe the model is None!!')
            raise e

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
コード例 #10
0
ファイル: base_detector.py プロジェクト: dinglinghu/DRN2020
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch,
                                  opt.heads,
                                  opt.head_conv,
                                  number_stacks=opt.number_stacks,
                                  fsm=opt.fsm,
                                  drmc=opt.drmc,
                                  drmr=opt.drmr,
                                  only_ls=opt.only_ls)
        self.model = load_model(self.model, opt.load_model, test=True)
        self.model = self.model.to(opt.device)
        self.model.eval()
        if opt.gene_guided_grad_cam:
            self.act_maps = []
            self.register_hooks()
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        # modified by condi
        # self.max_per_image = 100
        self.max_per_image = opt.K
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
コード例 #11
0
ファイル: multitracker.py プロジェクト: hwfan/FairMOT
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
        self.roi_align = RoIAlign(7, 7)
        cfg = get_config()
        cfg.merge_from_file(
            "/home/hongwei/track-human/FairMOT/src/lib/tracker/deep_configs/yolov3.yaml"
        )
        cfg.merge_from_file(
            "/home/hongwei/track-human/FairMOT/src/lib/tracker/deep_configs/deep_sort.yaml"
        )
        self.detector = build_detector(cfg, True)
コード例 #12
0
def main(cfg):

    model = create_model('mobilenetv2', cfg.MODEL.HEAD_CONV, cfg).cuda()

    weight_path = '../models/centerface/mobilenetv2-large/model_best.pth'
    state_dict = torch.load(
        weight_path, map_location=lambda storage, loc: storage)['state_dict']
    model.load_state_dict(state_dict)

    onnx_file_path = "../models/centerface/mobilenetv2-large/mobile.onnx"

    #img = cv2.imread('test_image.jpg')
    image = cv2.imread('../images/image1.jpeg')
    images, meta = pre_process(image, cfg, scale=1)

    model.cuda()
    model.eval()
    model.float()
    torch_input = images.cuda()
    # print(torch_input.shape)
    print('save...')
    torch.onnx.export(model, torch_input, onnx_file_path, verbose=False)
    sess = nxrun.InferenceSession(onnx_file_path)

    print('save done')
    input_name = sess.get_inputs()[0].name
    output_onnx = sess.run(None, {input_name: images.cpu().data.numpy()})
コード例 #13
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = 128
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
コード例 #14
0
    def __init__(self, options):
        if options.gpus[0] >= 0:
            try:
                self.ctx = mx.gpu()
                _ = nd.zeros((1, ), ctx=self.ctx)
            except mx.base.MXNetError:
                print("No GPU available. Use CPU instead.")
                self.ctx = mx.cpu()
        else:
            self.ctx = mx.cpu()

        print("Creating model...")
        self.model = create_model(options.arch,
                                  options.heads,
                                  options.head_conv,
                                  ctx=self.ctx)
        if options.load_model_path != '':
            self.model = load_model(self.model,
                                    options.load_model_path,
                                    ctx=self.ctx)

        self.mean = np.array(options.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(options.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = options.num_classes
        self.scales = options.test_scales
        self.opt = options
        self.pause = True
コード例 #15
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        # convert to onnx
        # input_names = ["input0"]
        # output_names = ["hm", "wh", "id", "reg"]
        # inputs = torch.randn(1, 3, 480, 640).to('cpu')
        # torch_out = torch.onnx._export(self.model, inputs, 'pruned.onnx', export_params=True, verbose=False,
        #                                input_names=input_names, output_names=output_names)
        # print("export onnx sucess")

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
コード例 #16
0
def test_hrnet():
    arch = 'hrnet'
    heads = {'hm': 80, 'wh': 2, 'reg': 2}
    head_conv = 64  # 64, 128, 256
    cfg = '/home/wanghongwei/WorkSpace/source/detect/CenterNet/src/lib/config/w32_256x192_adam_lr1e-3.yaml'
    image = torch.randn(3, 3, 256, 192)
    model = create_model(arch=arch, heads=heads, head_conv=head_conv, cfg=cfg)
    result = model(image)
    return result
コード例 #17
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if epoch > 100:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
コード例 #18
0
ファイル: center_main.py プロジェクト: uCedar/ObjectDetection
    def __init__(self, opt):
        torch.manual_seed(opt.seed)
        torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
        Dataset = get_dataset(opt.dataset, opt.task)
        opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
        self.opt = opt
        print(opt)

        self.logger = Logger(opt)

        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
        opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

        print('Creating model...')
        model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = model

        optimizer = torch.optim.Adam(model.parameters(), opt.lr)
        self.optimizer = optimizer

        start_epoch = 0
        if opt.load_model != '':
            model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                       optimizer, opt.resume,
                                                       opt.lr, opt.lr_step)

        Trainer = train_factory[opt.task]
        trainer = Trainer(opt, model, optimizer)
        trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
        self.trainer = trainer

        print('Setting up data...')
        val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                                 batch_size=1,
                                                 shuffle=False,
                                                 num_workers=1,
                                                 pin_memory=True)
        self.val_loader = val_loader

        if opt.test:
            _, preds = trainer.val(0, val_loader)
            val_loader.dataset.run_eval(preds, opt.save_dir)
            return

        train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                                   batch_size=opt.batch_size,
                                                   shuffle=True,
                                                   num_workers=opt.num_workers,
                                                   pin_memory=True,
                                                   drop_last=True)
        self.train_loader = train_loader

        self.best = 1e10
コード例 #19
0
 def __init__(self,
              model_path,
              conf_thres=0.4,
              model_name='dla_34',
              device='cpu'):
     heads = {'hm': 1, 'wh': 4, 'id': 128, 'reg': 2}
     head_conv = 256
     self.device = device
     self.conf_thres = conf_thres
     self.model = create_model(model_name, heads, head_conv)
     self.model = load_model(self.model, model_path)
     self.model = self.model.to(device)
     self.model.eval()
コード例 #20
0
ファイル: check_models.py プロジェクト: shenmayufei/294-82
def get_model(opt, model_path):
    if opt.gpus[0] >= 0:
        opt.device = torch.device("cuda")
    else:
        opt.device = torch.device("cpu")

    print("Creating model...")
    # model_path = opt.load_model
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    checkpoint = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    print('Loaded {}, #epochs: {}'.format(model_path, checkpoint['epoch']))

    print(f"num_params: {count_parameters(model)}")
コード例 #21
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.num_classes)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)

        self.num_classes = opt.num_classes
        self.opt = opt
        self.pause = True
コード例 #22
0
ファイル: base_detector.py プロジェクト: xiexu666/centerpose
    def __init__(self, cfg):
    
        print('Creating model...')
        self.model = create_model(cfg.MODEL.NAME, cfg.MODEL.HEAD_CONV, cfg)
        self.model = load_model(self.model, cfg.TEST.MODEL_PATH)
        self.model = self.model.to(torch.device('cuda'))
        self.model.eval()

        self.mean = np.array(cfg.DATASET.MEAN, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(cfg.DATASET.STD, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = cfg.MODEL.NUM_CLASSES
        self.scales = cfg.TEST.TEST_SCALES
        self.cfg = cfg
        self.pause = True
コード例 #23
0
def demo(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    opt.device = torch.device('cuda')

    print('Loading images...')
    if os.path.isdir(opt.demo):
        image_names = []
        ls = os.listdir(opt.demo)
        for file_name in sorted(ls):
            ext = file_name[file_name.rfind('.') + 1:].lower()
            if ext in image_ext:
                image_names.append(os.path.join(opt.demo, file_name))
    else:
        image_names = [opt.demo]

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, opt.load_model)
    model = model.to(opt.device)

    for (image_name) in image_names:
        image = cv2.imread(image_name)
        original_images = pre_process(image)
        original_images = original_images.to(opt.device)
        images = torch.tensor(original_images)
        images.requires_grad = True
        for i in range(20):
            hm = model(images)[-1]['hm'].sigmoid_()
            scores = _topk(hm, K=1)
            loss = torch.sum(scores)
            if loss > 0:
                print(loss)
                model.zero_grad()
                loss.backward()
                grad = images.grad.data.sign()
                images = images - 0.4 * grad
                images = torch.clamp(images, -1, 1)
            else:
                break
        perturb = (images - original_images).squeeze(0).numpy()
        perturb = perturb.transpose(1, 2, 0)
        perturb = perturb * np.array(opt.std, dtype=np.float32).reshape(
            1, 1, 3).astype(np.float32) * 255
        perturb = (perturb.astype(np.int16) + 128).clip(0, 255)
        perturb = cv2.resize(perturb, (image.shape[0:2]))
        perturb = perturb - 128
        adv_image = (image + perturb).clip(0, 255)
        cv2.imwrite('results/' + image_name, adv_image)
コード例 #24
0
ファイル: inference.py プロジェクト: Spratumn/CircleDetect
    def __init__(self, model_path, cfg):
        super(Detector, self).__init__()
        self.cfg = cfg
        print('Creating model...')
        self.model = create_model(self.cfg, 'res_18')
        self.model = load_model(self.model, model_path)
        self.model = self.model.to(self.cfg.DEVICE)
        self.model.eval()

        self.mean = np.array(self.cfg.DATA_MEAN,
                             dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(self.cfg.DATA_STD,
                            dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = cfg.K
        self.scales = self.cfg.TEST_SCALES
        self.pause = True
コード例 #25
0
ファイル: model_centernet.py プロジェクト: sueki743/ACE
  def __init__(self, sess, model_saved_path, labels_path, fix_ratio, **_):
    with open(labels_path) as f:
      self.labels = f.read().splitlines()
    self.image_shape = [600, 600, 3]  # TODO

    self.opt = opt = opts().parse('ctdet --dataset bdd --keep_res'.split())  # TODO: others than bdd
    Dataset = get_dataset(opt.dataset, opt.task)
    opts.update_dataset_info_and_set_heads(None, opt, Dataset)
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, model_saved_path)
    # TODO: loss
    # TODO: model_with_loss
    self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    self.model = model.to(self.device)  # TODO: model_with_loss
    self.model.eval()  # TODO: model_with_loss
    self.fix_ratio = fix_ratio
コード例 #26
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
コード例 #27
0
  def __init__(self, opt):
    if opt.gpus[0] >= 0:
      opt.device = torch.device('cuda')
    else:
      opt.device = torch.device('cpu')
    
    print('Creating model...')
    self.model = create_model(opt.arch, opt.heads, opt.head_conv)
    self.model = load_model(self.model, "/nfs4/ajaym/Downloads/cendeep_sort_pytorch-master/centernet/models/ctdet_coco_dla_2x.pth")
    self.model = self.model.to(opt.device)
    self.model.eval()

    self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
    self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
    self.max_per_image = 100
    self.num_classes = opt.num_classes
    self.scales = opt.test_scales
    self.opt = opt
    self.pause = True
コード例 #28
0
ファイル: base_detector.py プロジェクト: Shuai-Xie/CenterNet
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')  # can be multi gpu
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)  # load pretrain
        self.model = self.model.to(opt.device)
        self.model.eval()  # set BN, Dropout constant

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = opt.K  # 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
コード例 #29
0
    def __init__(self, opt):
        self.model = create_model(opt.arch,
                                  opt.heads,
                                  opt.head_conv,
                                  opt.num_layers,
                                  training=False,
                                  channel_last=opt.channel_last)
        if opt.checkpoint != '':
            extension = os.path.splitext(opt.checkpoint)[1]
            assert (
                extension == '.h5' or extension == ".protobuf"
            ), "incorrect file extension, should be either .h5 or .protobuf"
            load_model(self.model, opt.checkpoint, clear=True)

        self.mean = opt.mean
        self.std = opt.std
        self.max_per_image = 128
        self.opt = opt
        self.pause = True
コード例 #30
0
def main(opt):
  torch.manual_seed(opt.seed)
  torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
  Dataset = get_dataset(opt.dataset, opt.task)
  opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
  print(opt)

  logger = Logger(opt)

  os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
  opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')
  
  print('Creating model...')
  model = create_model(opt.arch, opt.heads, opt.head_conv)
  print(next(model.parameters()).device)
  model.to("cuda")
  summary(model, (3, 512, 512),device="cuda")

  computeTime(model,"cuda")