Example #1
0
def forward_pytorch(weight_file, image):
    # define model network
    # net = resnet.resnet18()
    checkpoint = torch.load(weight_file)
    # net.load_state_dict(checkpoint)

    heads = {'hm': 5, 'wh': 2, 'reg': 2, 'id': 128}
    net = create_model(arch='resdcn_18', heads=heads, head_conv=-1)
    # model_path = '/mnt/diskc/maqiao/even/MCMOT/exp/mot/default/mcmot_last_track_resdcn_18.pth'
    net = load_model(model=net, model_path=weight_file)
    net.eval()

    if args.cuda:
        net.cuda()

    print(net)
    net.eval()
    image = torch.from_numpy(image)
    if args.cuda:
        image = Variable(image.cuda())
    else:
        image = Variable(image)

    image = torch.tensor(image, dtype=torch.float32)
    t0 = time.time()
    blobs = net.forward(image)
    # print(blobs.data.numpy().flatten())
    t1 = time.time()
    return t1 - t0, blobs, net.parameters()
Example #2
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        # if opt.gpus[0] >= 0:
        #     opt.device = torch.device('cuda')
        # else:
        #     opt.device = torch.device('cpu')

        # ----- init model
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model,
                                opt.load_model)  # load specified checkpoint
        self.model = self.model.to(opt.device)
        self.model.eval()

        # ----- track_lets
        self.tracked_stracks_dict = defaultdict(
            list)  # value type: list[STrack]
        self.lost_stracks_dict = defaultdict(list)  # value type: list[STrack]
        self.removed_stracks_dict = defaultdict(
            list)  # value type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(
            frame_rate / 30.0 *
            opt.track_buffer)  # int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = self.opt.K  # max objects per image
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        # ----- using kalman filter to stabilize tracking
        self.kalman_filter = KalmanFilter()
Example #3
0
    def __init__(self, opt, frame_rate=30):
        self.opt = opt
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.tracked_stracks = []  # type: list[STrack]
        self.lost_stracks = []  # type: list[STrack]
        self.removed_stracks = []  # type: list[STrack]

        self.frame_id = 0
        self.det_thresh = opt.conf_thres
        self.buffer_size = int(frame_rate / 30.0 * opt.track_buffer)
        self.max_time_lost = self.buffer_size
        self.max_per_image = opt.K
        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)

        self.kalman_filter = KalmanFilter()
Example #4
0
def prefetch_test(opt):

    Dataset = dataset_factory[opt.dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    #model = load_model(self.model, opt.load_model)
    model.eval()
    p2c = Pytorch2Caffe(model, '/home/lijf/MT', 'parking', [3, 512, 512])
    p2c.start()
    def __init__(self, opt):

        print('Creating model...')
        self.model = create_model(
            opt.heads,
            opt.head_conv)  # {'hm': opt.num_classes, 'wh': 2, 'reg': 2} ,  256
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.cuda()
        self.model.eval()

        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.opt = opt
        transform_val_list = [
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ]
        self.trans_compose = transforms.Compose(transform_val_list)
        del transform_val_list
Example #6
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        print('Creating model...')
        self.model = create_model(opt.arch, opt.heads, opt.head_conv)
        self.model = load_model(self.model, opt.load_model)
        self.model = self.model.to(opt.device)
        self.model.eval()

        self.mean = np.array(opt.mean, dtype=np.float32).reshape(1, 1, 3)
        self.std = np.array(opt.std, dtype=np.float32).reshape(1, 1, 3)
        self.max_per_image = 100
        self.num_classes = opt.num_classes
        self.scales = opt.test_scales
        self.opt = opt
        self.pause = True
Example #7
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test

    print('Setting up data...')
    Dataset = get_dataset(opt.task)
    f = open(opt.data_cfg)
    data_config = json.load(f)
    trainset_paths = data_config['train']
    dataset_root = data_config['root']
    f.close()
    transforms = T.Compose([T.ToTensor()])
    dataset = Dataset(opt,
                      dataset_root,
                      trainset_paths, (1088, 608),
                      augment=True,
                      transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')

    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    # Get dataloader

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
        if epoch % 5 == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
    logger.close()
Example #8
0
def test_single(img_path, dev):
    """
    :param img_path:
    :param dev:
    :return:
    """
    if not os.path.isfile(img_path):
        print('[Err]: invalid image path.')
        return

    # Head dimensions of the net
    heads = {'hm': 5, 'reg': 2, 'wh': 2, 'id': 128}

    # Load model and put to device
    net = create_model(arch='resdcn_18', heads=heads, head_conv=256)
    model_path = '/mnt/diskb/even/MCMOT/exp/mot/default/mcmot_last_det_resdcn_18.pth'
    net = load_model(model=net, model_path=model_path)
    net = net.to(dev)
    net.eval()
    print(net)

    # Read image
    img_0 = cv2.imread(img_path)  # BGR
    assert img_0 is not None, 'Failed to load ' + img_path

    # Padded resize
    h_in, w_in = 608, 1088  # (608, 1088) (320, 640)
    img, _, _, _ = letterbox(img=img_0, height=h_in, width=w_in)

    # Preprocess image: BGR -> RGB and H×W×C -> C×H×W
    img = img[:, :, ::-1].transpose(2, 0, 1)
    img = np.ascontiguousarray(img, dtype=np.float32)
    img /= 255.0

    # Convert to tensor and put to device
    blob = torch.from_numpy(img).unsqueeze(0).to(dev)

    with torch.no_grad():
        # Network output
        output = net.forward(blob)[-1]

        # Tracking output
        hm = output['hm'].sigmoid_()
        reg = output['reg']
        wh = output['wh']
        id_feature = output['id']
        id_feature = F.normalize(id_feature,
                                 dim=1)  # L2 normalization for feature vector

        # Decode output
        dets, inds, cls_inds_mask = mot_decode(hm, wh, reg, 5, False, 128)

        # Get ReID feature vector by object class
        cls_id_feats = []  # topK feature vectors of each object class
        for cls_id in range(5):  # cls_id starts from 0
            # get inds of each object class
            cls_inds = inds[:, cls_inds_mask[cls_id]]

            # gather feats for each object class
            cls_id_feature = _tranpose_and_gather_feat(id_feature,
                                                       cls_inds)  # inds: 1×128
            cls_id_feature = cls_id_feature.squeeze(0)  # n × FeatDim
            if dev == 'cpu':
                cls_id_feature = cls_id_feature.numpy()
            else:
                cls_id_feature = cls_id_feature.cpu().numpy()
            cls_id_feats.append(cls_id_feature)

        # Convert back to original image coordinate system
        height_0, width_0 = img_0.shape[0], img_0.shape[
            1]  # H, W of original input image
        dets = map2orig(dets, h_in // 4, w_in // 4, height_0, width_0,
                        5)  # translate and scale

        # Parse detections of each class
        dets_dict = defaultdict(list)
        for cls_id in range(5):  # cls_id start from index 0
            cls_dets = dets[cls_id]

            # filter out low conf score dets
            remain_inds = cls_dets[:, 4] > 0.4
            cls_dets = cls_dets[remain_inds]
            # cls_id_feature = cls_id_feats[cls_id][remain_inds]  # if need re-id
            dets_dict[cls_id] = cls_dets

    # Visualize detection results
    img_draw = plot_detects(img_0, dets_dict, 5, frame_id=0, fps=30.0)
    # cv2.imshow('Detection', img_draw)
    # cv2.waitKey()
    cv2.imwrite('/mnt/diskb/even/MCMOT/results/00000.jpg', img_draw)
Example #9
0
def run(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test

    print('Setting up data...')
    Dataset = get_dataset(opt.dataset, opt.task,
                          opt.multi_scale)  # if opt.task==mot -> JointDataset

    f = open(opt.data_cfg
             )  # choose which dataset to train '../src/lib/cfg/mot15.json',
    data_config = json.load(f)
    trainset_paths = data_config['train']  # 训练集路径
    dataset_root = data_config['root']  # 数据集所在目录
    print("Dataset root: %s" % dataset_root)
    f.close()

    # Image data transformations
    transforms = T.Compose([T.ToTensor()])

    # Dataset
    dataset = Dataset(opt=opt,
                      root=dataset_root,
                      paths=trainset_paths,
                      img_size=opt.input_wh,
                      augment=True,
                      transforms=transforms)
    opt = opts().update_dataset_info_and_set_heads(opt, dataset)
    print("opt:\n", opt)

    logger = Logger(opt)

    # os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str  # 多GPU训练
    # print("opt.gpus_str: ", opt.gpus_str)

    opt.device = torch.device('cuda:0' if opt.gpus[0] >= 0 else 'cpu')  # 设置GPU

    #opt.device = device #NC UPDATE - fallback to original fairmot
    #opt.gpus = my_visible_devs #NC UPDATE

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)

    # 初始化优化器
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)

    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    # Get dataloader
    if opt.is_debug:
        if opt.multi_scale:
            train_loader = torch.utils.data.DataLoader(
                dataset=dataset,
                batch_size=opt.batch_size,
                shuffle=False,
                pin_memory=True,
                drop_last=True)  # debug时不设置线程数(即默认为0)
        else:
            train_loader = torch.utils.data.DataLoader(
                dataset=dataset,
                batch_size=opt.batch_size,
                shuffle=True,
                pin_memory=True,
                drop_last=True)  # debug时不设置线程数(即默认为0)
    else:
        if opt.multi_scale:
            train_loader = torch.utils.data.DataLoader(
                dataset=dataset,
                batch_size=opt.batch_size,
                shuffle=False,
                num_workers=opt.num_workers,
                pin_memory=True,
                drop_last=True)
        else:
            train_loader = torch.utils.data.DataLoader(
                dataset=dataset,
                batch_size=opt.batch_size,
                shuffle=True,
                pin_memory=True,
                drop_last=True)  # debug时不设置线程数(即默认为0)

    print('Starting training...')
    Trainer = train_factory[opt.task]
    trainer = Trainer(opt=opt, model=model, optimizer=optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)
    # trainer.set_device(opt.gpus, opt.chunk_sizes, device) #NC UPDATE

    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'

        # Train an epoch
        log_dict_train, _ = trainer.train(epoch, train_loader)

        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))

        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
        else:  # mcmot_last_track or mcmot_last_det
            if opt.id_weight > 0:  # do tracking(detection and re-id)
                save_model(
                    os.path.join(opt.save_dir,
                                 'mcmot_last_track_' + opt.arch + '.pth'),
                    epoch, model, optimizer)
            else:  # only do detection
                # save_model(os.path.join(opt.save_dir, 'mcmot_last_det_' + opt.arch + '.pth'),
                #        epoch, model, optimizer)
                save_model(
                    os.path.join(opt.save_dir,
                                 'mcmot_last_det_' + opt.arch + '.pth'), epoch,
                    model, optimizer)
        logger.write('\n')

        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)

            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)

            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        if epoch % 10 == 0:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
    logger.close()
Example #10
0
def gen_det(opt,
            batch_size=12,
            img_size=(1088, 608)):
    data_cfg = opt.data_cfg
    f = open(data_cfg)
    data_cfg_dict = json.load(f)
    f.close()
    test_path = data_cfg_dict['test']
    dataset_root = data_cfg_dict['root']
    if opt.gpus[0] >= 0:
        opt.device = torch.device('cuda')
    else:
        opt.device = torch.device('cpu')
    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, opt.load_model)
    # model = torch.nn.DataParallel(model)
    model = model.to(opt.device)
    model.eval()
    # dummy_input = torch.rand(1, 3, 1088, 608).cuda()  # 假设输入13张1*28*28的图片
    # with SummaryWriter(comment='model') as w:
    #     w.add_graph(model, dummy_input)
    # Get dataloader
    transforms = T.Compose([T.ToTensor()])
    dataset = DetDataset(dataset_root, test_path, img_size, augment=False, transforms=transforms)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
                                             num_workers=0, drop_last=False)
    seen = 0
    dataloader = tqdm(dataloader)
    for batch_i, (imgs, paths, shapes) in enumerate(dataloader):
        seen += batch_size
        if seen < 3148:
            continue
        path = paths[0]
        split = path.split("/")
        split[0] += "/"
        if "MOT16-03" in path:
            continue
        if "MOT16-01" in path:
            continue
        # if int(split[-1].strip(".jpg")) < 736:
        #     continue
        output = model(imgs.cuda())[-1]
        origin_shape = shapes[0]
        width = origin_shape[1]
        height = origin_shape[0]
        inp_height = img_size[1]
        inp_width = img_size[0]
        c = np.array([width / 2., height / 2.], dtype=np.float32)
        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
        meta = {'c': c, 's': s,
                'out_height': inp_height // opt.down_ratio,
                'out_width': inp_width // opt.down_ratio}
        hm = output['hm'].sigmoid_()
        wh = output['wh']
        reg = output['reg'] if opt.reg_offset else None
        opt.K = 200
        detections, inds = mot_decode(hm, wh, reg=reg, cat_spec_wh=opt.cat_spec_wh, K=opt.K)
        # Compute average precision for each sample
        for si, _ in enumerate(imgs):
            seen += 1
            # path = paths[si]
            # img0 = cv2.imread(path)

            dets = detections[si]
            dets = dets.unsqueeze(0)
            dets = post_process(opt, dets, meta)
            dets = merge_outputs(opt, [dets])[1]
            if dets is None:
                continue
            path = paths[si]
            split = path.split("/")
            split[0] += "/"
            det_file = os.path.join(*split[:-2], "det", "FairMOT_det.txt")
            with open(det_file, "a+") as f:
                frame_id = int(split[-1].strip(".jpg"))
                img1 = cv2.imread(path)
                remain_inds = dets[:, 4] > 0.4
                dets = dets[remain_inds]
                xywh = xyxy2ct_xywh(dets[:, :4])

                for t in range(len(dets)):
                    x1 = dets[t, 0]
                    y1 = dets[t, 1]
                    x2 = dets[t, 2]
                    y2 = dets[t, 3]
                    f.write(
                        "%d,-1, %.2f, %.2f, %.2f, %.2f, %.2f, -1,-1,-1\n" % (
                            frame_id, xywh[t, 0], xywh[t, 1], xywh[t, 2], xywh[t, 3], dets[t, 4]))
                    cv2.rectangle(img1, (x1, y1), (x2, y2), (0, 255, 0), 4)
                cv2.imshow("", img1)
                cv2.waitKey(100)
                # cv2.imwrite('pred.jpg', img1)

    return None
    x = self.deconv_layers(x)
    ret = []  ## change dict to list
    for head in self.heads:
        ret.append(self.__getattr__(head)(x))
    return ret


forward = {
    'dla': pose_dla_forward,
    'dlav0': dlav0_forward,
    'resdcn': resnet_dcn_forward
}

opt = opts().init(
)  ## change lib/opts.py add_argument('task', default='ctdet'....) to add_argument('--task', default='ctdet'....)
opt.arch = 'resdcn_18'
opt.heads = OrderedDict([('hm', 10), ('reg', 2), ('wh', 2)])
opt.head_conv = 256 if 'dla' in opt.arch else 64
print(opt)
model = create_model(opt.arch, opt.heads, opt.head_conv)
model.forward = MethodType(forward[opt.arch.split('_')[0]], model)
load_model(model, 'ctdet_bdd_resnet18_512.pth')
model.eval()
model.cuda()
input = torch.zeros([1, 3, 512, 512]).cuda()
onnx.export(model,
            input,
            "ctdet_bdd_resnet18_512.onnx",
            verbose=True,
            operator_export_type=OperatorExportTypes.ONNX)
Example #12
0
def main(opt):
    torch.manual_seed(opt.seed)
    # benchmark=True 自动寻找最适合当前配置的高效算法,来达到优化运行效率的wento
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test

    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    def adjust_learning_rate(optimizer, epoch):
        # use warmup
        if epoch < 5:
            lr = opt.lr * ((epoch + 1) / 5)
        else:
            # use cosine lr
            PI = 3.14159
            lr = opt.lr * 0.5 * (1 + math.cos(epoch * PI / 20))
            # print(1111)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    # optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    optimizer = torch.optim.SGD(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    # num_worker=0是主进程读取; >0使用多进程读取,子进程读取数据时,训练程序会卡住,GPU utils为0,
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        adjust_learning_rate(optimizer, epoch)
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % 2 == 0 and epoch > 10:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)

                # test(opt)
                # opt.model = None
        if epoch > 20:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
        # elif 80 < epoch <=100 and epoch % 3 == 0:
        #     save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
        #         epoch, model, optimizer)
        logger.write('\n')
        # if epoch in opt.lr_step:
        #     save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
        #         epoch, model, optimizer)
        #     lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
        #     print('Drop LR to', lr)
        #     for param_group in optimizer.param_groups:
        #         param_group['lr'] = lr
        print('Epoch is Finished')
    logger.close()
Example #13
0
def main(opt):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(opt.dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=0,
                                             pin_memory=True)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_loader)
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                log_dict_val, preds = trainer.val(epoch, val_loader)
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
def main(opt):
    torch.manual_seed(
        opt.seed
    )  # opt.seed: default=317  ;加上torch.manual_seed这个函数调用的话,打印出来的随机数每次都一样。
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    Dataset = get_dataset(
        opt.dataset, opt.task
    )  # opt.dataset = coco, opt.task = ctdet (| ddd | multi_pose | exdet)
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    print(opt)

    logger = Logger(opt)

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    optimizer = torch.optim.Adam(model.parameters(), opt.lr)
    start_epoch = 0
    if opt.load_model != '':
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]
    trainer = Trainer(opt, model, optimizer)
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up data...')
    val_loader = torch.utils.data.DataLoader(Dataset(opt, 'val'),
                                             batch_size=1,
                                             shuffle=False,
                                             num_workers=1,
                                             pin_memory=True)  # modified by zy
    # val_loader = torch.utils.data.DataLoader(Dataset(opt, 'test'), batch_size=1, shuffle=False, num_workers=1,pin_memory=True)

    if opt.test:
        _, preds = trainer.val(0, val_loader)
        val_loader.dataset.run_eval(preds, opt.save_dir)
        return

    train_loader = torch.utils.data.DataLoader(Dataset(opt, 'train'),
                                               batch_size=opt.batch_size,
                                               shuffle=True,
                                               num_workers=opt.num_workers,
                                               pin_memory=True,
                                               drop_last=True)

    output_choice_log = '/home/zy/zy/2new_network/CenterNet-master/output_choice.log'
    if os.path.exists(output_choice_log):
        os.remove(output_choice_log)

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        try:
            log_dict_train, _ = trainer.train(
                epoch, train_loader
            )  # !!!!!!!!  train = self.run_epoch('train', epoch, data_loader)
        except Exception as e:  # 如果发生异常,那就返回预设的loss值
            print('Error_train!!!', e)
            print(traceback.format_exc())
            continue
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
        if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),
                       epoch, model, optimizer)
            with torch.no_grad():
                try:
                    log_dict_val, preds = trainer.val(epoch, val_loader)
                except Exception as e:  # 如果发生异常,那就返回预设的loss值
                    print('Error_train!!!', e)
                    print(traceback.format_exc())
                    continue
            for k, v in log_dict_val.items():
                logger.scalar_summary('val_{}'.format(k), v, epoch)
                logger.write('{} {:8f} | '.format(k, v))
            if log_dict_val[opt.metric] < best:
                best = log_dict_val[opt.metric]
                save_model(os.path.join(opt.save_dir, 'model_best.pth'), epoch,
                           model)
        else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:
            save_model(
                os.path.join(opt.save_dir, 'model_{}.pth'.format(epoch)),
                epoch, model, optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()
# encoding=utf-8
import sys

sys.path.append('/mnt/diskb/even/MCMOT/src')
sys.path.append('.')

import pytorch_to_caffe
# from torch.autograd import Variable
import torch
from lib.models.model import create_model, load_model

if __name__ == '__main__':
    heads = {'hm': 5, 'wh': 2, 'reg': 2, 'id': 128}
    net = create_model(arch='resdcn_18', heads=heads, head_conv=-1)
    model_path = '/mnt/diskb/even/MCMOT/exp/mot/default/mcmot_last_track_resdcn_18.pth'
    net = load_model(model=net, model_path=model_path)
    net.eval()

    input = torch.ones([1, 3, 608, 1088])
    name = 'mcmot_resdcn_18'

    pytorch_to_caffe.trans_net(net, input, name)
    pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
Example #16
0
# encoding=utf-8
import sys

sys.path.append('/mnt/diskb/even/MCMOT/src')
sys.path.append('.')
import pytorch_to_caffe
# from torch.autograd import Variable
import torch
from lib.models.model import create_model, load_model

if __name__ == '__main__':
    heads = {'hm': 5, 'wh': 2, 'reg': 2, 'id': 128}
    net = create_model(arch='hrnet_18', heads=heads, head_conv=-1)
    model_path = '/mnt/diskc/maqiao/even/MCMOT/exp/mot/default/mcmot_last_det_hrnet_18_de_conv.pth'
    net = load_model(model=net, model_path=model_path)
    net.eval()

    input = torch.ones([1, 3, 608, 1088])
    name = 'mcmot_hrnet18'

    pytorch_to_caffe.trans_net(net, input, name)
    pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
Example #17
0
def test_det(
        opt,
        batch_size=12,
        img_size=(1088, 608),
        iou_thres=0.5,
        print_interval=40,
):
    data_cfg = opt.data_cfg
    f = open(data_cfg)
    data_cfg_dict = json.load(f)
    f.close()
    nC = 1
    test_path = data_cfg_dict['test']
    dataset_root = data_cfg_dict['root']
    if opt.gpus[0] >= 0:
        opt.device = torch.device('cuda')
    else:
        opt.device = torch.device('cpu')
    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)
    model = load_model(model, opt.load_model)
    # model = torch.nn.DataParallel(model)
    model = model.to(opt.device)
    model.eval()

    # Get dataloader
    transforms = T.Compose([T.ToTensor()])
    dataset = DetDataset(dataset_root,
                         test_path,
                         img_size,
                         augment=False,
                         transforms=transforms)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=8,
                                             drop_last=False,
                                             collate_fn=collate_fn)
    mean_mAP, mean_R, mean_P, seen = 0.0, 0.0, 0.0, 0
    print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))
    outputs, mAPs, mR, mP, TP, confidence, pred_class, target_class, jdict = \
        [], [], [], [], [], [], [], [], []
    AP_accum, AP_accum_count = np.zeros(nC), np.zeros(nC)
    for batch_i, (imgs, targets, paths, shapes,
                  targets_len) in enumerate(dataloader):
        t = time.time()
        # seen += batch_size

        output = model(imgs.cuda())[-1]
        origin_shape = shapes[0]
        width = origin_shape[1]
        height = origin_shape[0]
        inp_height = img_size[1]
        inp_width = img_size[0]
        c = np.array([width / 2., height / 2.], dtype=np.float32)
        s = max(float(inp_width) / float(inp_height) * height, width) * 1.0
        meta = {
            'c': c,
            's': s,
            'out_height': inp_height // opt.down_ratio,
            'out_width': inp_width // opt.down_ratio
        }
        hm = output['hm'].sigmoid_()
        wh = output['wh']
        reg = output['reg'] if opt.reg_offset else None
        opt.K = 200
        detections, inds = mot_decode(hm,
                                      wh,
                                      reg=reg,
                                      cat_spec_wh=opt.cat_spec_wh,
                                      K=opt.K)
        # Compute average precision for each sample
        targets = [targets[i][:int(l)] for i, l in enumerate(targets_len)]
        for si, labels in enumerate(targets):
            seen += 1
            # path = paths[si]
            # img0 = cv2.imread(path)
            dets = detections[si]
            dets = dets.unsqueeze(0)
            dets = post_process(opt, dets, meta)
            dets = merge_outputs(opt, [dets])[1]

            # remain_inds = dets[:, 4] > opt.det_thres
            # dets = dets[remain_inds]
            if dets is None:
                # If there are labels but no detections mark as zero AP
                if labels.size(0) != 0:
                    mAPs.append(0), mR.append(0), mP.append(0)
                continue

            # If no labels add number of detections as incorrect
            correct = []
            if labels.size(0) == 0:
                # correct.extend([0 for _ in range(len(detections))])
                mAPs.append(0), mR.append(0), mP.append(0)
                continue
            else:
                target_cls = labels[:, 0]

                # Extract target boxes as (x1, y1, x2, y2)
                target_boxes = ct_xywh2xyxy(labels[:, 2:6])
                target_boxes[:, 0] *= width
                target_boxes[:, 2] *= width
                target_boxes[:, 1] *= height
                target_boxes[:, 3] *= height
                '''
                path = paths[si]
                img0 = cv2.imread(path)
                img1 = cv2.imread(path)
                for t in range(len(target_boxes)):
                    x1 = target_boxes[t, 0]
                    y1 = target_boxes[t, 1]
                    x2 = target_boxes[t, 2]
                    y2 = target_boxes[t, 3]
                    cv2.rectangle(img0, (x1, y1), (x2, y2), (0, 255, 0), 4)
                cv2.imwrite('gt.jpg', img0)
                for t in range(len(dets)):
                    x1 = dets[t, 0]
                    y1 = dets[t, 1]
                    x2 = dets[t, 2]
                    y2 = dets[t, 3]
                    cv2.rectangle(img1, (x1, y1), (x2, y2), (0, 255, 0), 4)
                cv2.imwrite('pred.jpg', img1)
                abc = ace
                '''

                detected = []
                for *pred_bbox, conf in dets:
                    obj_pred = 0
                    pred_bbox = torch.FloatTensor(pred_bbox).view(1, -1)
                    # Compute iou with target boxes
                    iou = bbox_iou(pred_bbox, target_boxes, x1y1x2y2=True)[0]
                    # Extract index of largest overlap
                    best_i = np.argmax(iou)
                    # If overlap exceeds threshold and classification is correct mark as correct
                    if iou[best_i] > iou_thres and obj_pred == labels[
                            best_i, 0] and best_i not in detected:
                        correct.append(1)
                        detected.append(best_i)
                    else:
                        correct.append(0)

            # Compute Average Precision (AP) per class
            AP, AP_class, R, P = ap_per_class(
                tp=correct,
                conf=dets[:, 4],
                pred_cls=np.zeros_like(dets[:, 4]),  # detections[:, 6]
                target_cls=target_cls)

            # Accumulate AP per class
            AP_accum_count += np.bincount(AP_class, minlength=nC)
            AP_accum += np.bincount(AP_class, minlength=nC, weights=AP)

            # Compute mean AP across all classes in this image, and append to image list
            mAPs.append(AP.mean())
            mR.append(R.mean())
            mP.append(P.mean())

            # Means of all images
            mean_mAP = np.sum(mAPs) / (AP_accum_count + 1E-16)
            mean_R = np.sum(mR) / (AP_accum_count + 1E-16)
            mean_P = np.sum(mP) / (AP_accum_count + 1E-16)

        if batch_i % print_interval == 0:
            # Print image mAP and running mean mAP
            print(('%11s%11s' + '%11.3g' * 4 + 's') %
                  (seen, dataloader.dataset.nF, mean_P, mean_R, mean_mAP,
                   time.time() - t))
    # Print mAP per class
    print('%11s' * 5 % ('Image', 'Total', 'P', 'R', 'mAP'))

    print('AP: %-.4f\n\n' % (AP_accum[0] / (AP_accum_count[0] + 1E-16)))

    # Return mAP
    return mean_mAP, mean_R, mean_P
Example #18
0
def main(opt, opt_t):
    torch.manual_seed(opt.seed)
    torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test
    if opt.target_dataset:
        Dataset_target = get_dataset(opt_t.target_dataset, opt_t.task)
        opt_t = opts().update_dataset_info_and_set_heads(
            opt_t, Dataset_target)  # target dataset
    Dataset_source = get_dataset(opt.source_dataset, opt.task)
    opt = opts().update_dataset_info_and_set_heads(
        opt, Dataset_source)  # source dataset
    print(opt)

    logger = Logger(opt)  # record

    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')

    print('Creating model...')
    model = create_model(opt.arch, opt.heads, opt.head_conv)  # create model
    optimizer = torch.optim.Adam(model.parameters(),
                                 opt.lr)  # create optimizer
    start_epoch = 0
    if opt.load_model != '':  # load model
        model, optimizer, start_epoch = load_model(model, opt.load_model,
                                                   optimizer, opt.resume,
                                                   opt.lr, opt.lr_step)

    Trainer = train_factory[opt.task]  # set trainer function
    trainer = Trainer(opt, model, optimizer)  # initial trainer
    trainer.set_device(opt.gpus, opt.chunk_sizes, opt.device)

    print('Setting up source_val data...')
    val_source_loader = torch.utils.data.DataLoader(Dataset_source(opt, 'val'),
                                                    batch_size=1,
                                                    shuffle=False,
                                                    num_workers=0,
                                                    pin_memory=True)

    if opt.test:
        _, preds = trainer.val(0, val_source_loader)
        val_source_loader.dataset.run_eval(preds, opt.save_dir)
        return

        # source loader
    print('Setting up source_train data...')
    train_source_loader = torch.utils.data.DataLoader(
        Dataset_source(opt, 'train'),  # modify SOURCE dataset parameters
        batch_size=opt.batch_size,
        shuffle=True,
        num_workers=opt.num_workers,
        pin_memory=True,
        drop_last=True)

    if opt.target_dataset:
        # target loader
        print('Setting up target_train data...')
        train_target_loader = torch.utils.data.DataLoader(
            Dataset_target(opt_t, 'train'),  # modify TARGET dataset parameters
            batch_size=opt_t.batch_size,
            shuffle=True,
            num_workers=opt_t.num_workers,
            pin_memory=True,
            drop_last=True)
        print('DA MODE')
    else:
        train_target_loader = None

    print('Starting training...')
    best = 1e10
    for epoch in range(start_epoch + 1, opt.num_epochs + 1):
        mark = epoch if opt.save_all else 'last'
        log_dict_train, _ = trainer.train(epoch, train_source_loader,
                                          train_target_loader)  # do train
        logger.write('epoch: {} |'.format(epoch))
        for k, v in log_dict_train.items():  # log information
            logger.scalar_summary('train_{}'.format(k), v, epoch)
            logger.write('{} {:8f} | '.format(k, v))
            # if opt.val_intervals > 0 and epoch % opt.val_intervals == 0:
            #   save_model(os.path.join(opt.save_dir, 'model_{}.pth'.format(mark)),   # save last-model
            #              epoch, model, optimizer)
            #   with torch.no_grad():
            #     log_dict_val, preds = trainer.val(epoch, val_source_loader) # cal val-set loss
            #   for k, v in log_dict_val.items():
            #     logger.scalar_summary('val_{}'.format(k), v, epoch)
            #     logger.write('{} {:8f} | '.format(k, v))
            #   if log_dict_val[opt.metric] < best:
            #     best = log_dict_val[opt.metric]
            #     save_model(os.path.join(opt.save_dir, 'model_best.pth'),    # save best-model
            #                epoch, model)
            # else:
            save_model(os.path.join(opt.save_dir, 'model_last.pth'), epoch,
                       model, optimizer)
        logger.write('\n')
        if epoch in opt.lr_step:  # update learning rate
            save_model(
                os.path.join(
                    opt.save_dir,
                    'model_{}.pth'.format(epoch)),  # save lr_step-model
                epoch,
                model,
                optimizer)
            lr = opt.lr * (0.1**(opt.lr_step.index(epoch) + 1))
            print('Drop LR to', lr)
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr
    logger.close()