示例#1
0
def load_net(testiter, cfg_name, data_dir, cache_dir, cuda_id=0):
    cfg_file = os.path.join(system_configs.config_dir, cfg_name + ".json")
    with open(cfg_file, "r") as f:
        configs = json.load(f)
    configs["system"]["snapshot_name"] = cfg_name
    configs["system"]["data_dir"] = data_dir
    configs["system"]["cache_dir"] = cache_dir
    configs["system"]["result_dir"] = 'result_dir'
    configs["system"]["tar_data_dir"] = "Cls"
    system_configs.update_config(configs["system"])

    train_split = system_configs.train_split
    val_split = system_configs.val_split
    test_split = system_configs.test_split

    split = {
        "training": train_split,
        "validation": val_split,
        "testing": test_split
    }["validation"]

    test_iter = system_configs.max_iter if testiter is None else testiter
    print("loading parameters at iteration: {}".format(test_iter))
    dataset = system_configs.dataset
    db = datasets[dataset](configs["db"], split)
    print("building neural network...")
    nnet = NetworkFactory(db)
    print("loading parameters...")
    nnet.load_params(test_iter)
    if torch.cuda.is_available():
        nnet.cuda(cuda_id)
    nnet.eval_mode()
    return db, nnet
示例#2
0
def get_lane_model():
    input_size = [360, 640]
    mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
    std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
    with open('config\\LSTR.json', "r") as f:
        configs = json.load(f)
    configs["system"]["snapshot_name"] = 'LSTR'
    system_configs.update_config(configs["system"])
    nnet = NetworkFactory()

    with open('cache\\nnet\\LSTR\\LSTR_500000.pkl', "rb") as f:
        params = torch.load(f)
        model_dict = nnet.model.state_dict()
        if len(params) != len(model_dict):
            pretrained_dict = {
                k: v
                for k, v in params.items() if k in model_dict
            }
        else:
            pretrained_dict = params
        model_dict.update(pretrained_dict)

        nnet.model.load_state_dict(model_dict)
    nnet.cuda()
    nnet.eval_mode()
    return nnet, input_size, mean, std
示例#3
0
    def __init__(self, cfg_file, iter=10000, suffix=None):
        from test.centernet import inference

        model = importlib.import_module('models.%s' % cfg_file).model
        if suffix is None:
            cfg_path = os.path.join(system_configs.config_dir,
                                    "%s.json" % cfg_file)
        else:
            cfg_path = os.path.join(system_configs.config_dir,
                                    "%s-%s.json" % (cfg_file, suffix))
        model_path = get_file_path("..", "cache", "nnet", cfg_file,
                                   "%s_%d.pkl" % (cfg_file, iter))
        cfg_sys, cfg_db = load_cfg(cfg_path)
        cfg_sys["snapshot_name"] = cfg_file
        system_configs.update_config(cfg_sys)
        dataset = system_configs.dataset
        train_split = system_configs.train_split
        val_split = system_configs.val_split
        test_split = system_configs.test_split

        split = {
            "training": train_split,
            "validation": val_split,
            "testing": test_split
        }["validation"]

        demo = datasets[dataset](cfg_db, split)

        centernet = load_nnet(demo)
        super(CenterNet, self).__init__(demo,
                                        centernet,
                                        inference,
                                        model=model_path)
示例#4
0
    def __init__(self):
        model = "./cache/nnet/CenterNet-104/CenterNet_480000.pkl"
        json_file = "./config/CenterNet-104.json"

        with open(json_file, "r") as f:
            configs = json.load(f)

        configs["system"]["snapshot_name"] = "CenterNet-104"
        system_configs.update_config(
            configs["system"]
        )  # Update config.py based on retrieved 'system' parameters
        db_configs.update_config(configs["db"])

        self.nnet = NetworkFactory()
        self.nnet.load_params("480000")

        #drawer = Drawer()

        self.nnet.cuda()
        self.nnet.eval_mode()
示例#5
0
    args = parse_args()

    if args.suffix is None:
        cfg_file = os.path.join(system_configs.config_dir,
                                args.cfg_file + ".json")
    else:
        cfg_file = os.path.join(system_configs.config_dir,
                                args.cfg_file + "-{}.json".format(args.suffix))
    print("cfg_file: {}".format(cfg_file))

    with open(cfg_file, "r") as f:
        configs = json.load(f)

    configs["system"]["snapshot_name"] = "-".join(
        args.cfg_file.split("-")[:2])  # get the model name
    system_configs.update_config(configs["system"])
    system_configs.update_config({
        "categories": configs["db"]["categories"],
        "current_split": args.split
    })

    train_split = system_configs.train_split
    val_split = system_configs.val_split
    test_split = system_configs.test_split

    split = {
        "train": train_split,
        "val": val_split,
        "test": test_split
    }[args.split]
示例#6
0
文件: train.py 项目: yawudede/CPNDet
    # terminating data fetching processes
    for training_task in training_tasks:
        training_task.terminate()
    for validation_task in validation_tasks:
        validation_task.terminate()


if __name__ == "__main__":
    args = parse_args()

    cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
    with open(cfg_file, "r") as f:
        configs = json.load(f)

    configs["system"]["snapshot_name"] = args.cfg_file
    system_configs.update_config(configs["system"])

    train_split = system_configs.train_split
    val_split = system_configs.val_split

    print("loading all datasets...")
    dataset = system_configs.dataset
    # threads = max(torch.cuda.device_count() * 2, 4)
    threads = args.threads
    print("using {} threads".format(threads))
    training_dbs = [
        datasets[dataset](configs["db"], train_split) for _ in range(threads)
    ]
    validation_db = datasets[dataset](configs["db"], val_split)

    print("system config...")
def worker(input_q, output_q):
    # Load a (frozen) Tensorflow model into memory.

    cfg_file = os.path.join(
        "/data1/hhq/project/extremenet-inference/config/ExtremeNet.json")
    print("cfg_file: {}".format(cfg_file))

    with open(cfg_file, "r") as f:
        configs = json.load(f)

    configs["system"]["snapshot_name"] = "ExtremeNet"
    system_configs.update_config(configs["system"])
    print("system config...")

    K = configs["db"]["top_k"]
    aggr_weight = configs["db"]["aggr_weight"]
    scores_thresh = configs["db"]["scores_thresh"]
    center_thresh = configs["db"]["center_thresh"]
    suppres_ghost = True
    nms_kernel = 3

    scales = configs["db"]["test_scales"]
    weight_exp = 8
    categories = configs["db"]["categories"]
    nms_threshold = configs["db"]["nms_threshold"]
    max_per_image = configs["db"]["max_per_image"]
    nms_algorithm = {
        "nms": 0,
        "linear_soft_nms": 1,
        "exp_soft_nms": 2
    }["exp_soft_nms"]
    nnet = NetworkFactory(None)
    nnet.load_pretrained_params(
        "/data1/hhq/project/extremenet-inference/model/ExtremeNet_250000.pkl")
    nnet.cuda()
    nnet.eval_mode()

    fps = FPS().start()
    mean = np.array([0.40789654, 0.44719302, 0.47026115], dtype=np.float32)
    std = np.array([0.28863828, 0.27408164, 0.27809835], dtype=np.float32)
    top_bboxes = {}

    while True:
        fps.update()
        frame = input_q.get()
        # image = cv2.imread("/data/project/extremenet-inference/inputs/16004479832_a748d55f21_k.jpg")
        height, width = frame[1].shape[0:2]
        detections = []

        for scale in scales:
            new_height = int(height * scale)
            new_width = int(width * scale)
            new_center = np.array([new_height // 2, new_width // 2])

            inp_height = new_height | 127
            inp_width = new_width | 127

            images = np.zeros((1, 3, inp_height, inp_width), dtype=np.float32)
            ratios = np.zeros((1, 2), dtype=np.float32)
            borders = np.zeros((1, 4), dtype=np.float32)
            sizes = np.zeros((1, 2), dtype=np.float32)

            out_height, out_width = (inp_height + 1) // 4, (inp_width + 1) // 4
            height_ratio = out_height / inp_height
            width_ratio = out_width / inp_width

            resized_image = cv2.resize(frame[1], (new_width, new_height))
            resized_image, border, offset = crop_image(resized_image,
                                                       new_center,
                                                       [inp_height, inp_width])

            resized_image = resized_image / 255.
            normalize_(resized_image, mean, std)

            images[0] = resized_image.transpose((2, 0, 1))
            borders[0] = border
            sizes[0] = [int(height * scale), int(width * scale)]
            ratios[0] = [height_ratio, width_ratio]

            images = np.concatenate((images, images[:, :, :, ::-1]), axis=0)
            images = torch.from_numpy(images)
            dets = kp_decode(nnet,
                             images,
                             K,
                             aggr_weight=aggr_weight,
                             scores_thresh=scores_thresh,
                             center_thresh=center_thresh,
                             kernel=nms_kernel,
                             debug=True)
            dets = dets.reshape(2, -1, 14)
            dets[1, :, [0, 2]] = out_width - dets[1, :, [2, 0]]
            dets[1, :, [5, 7, 9, 11]] = out_width - dets[1, :, [5, 7, 9, 11]]
            dets[1, :, [7, 8, 11, 12]] = dets[1, :, [11, 12, 7, 8]].copy()
            dets = dets.reshape(1, -1, 14)

            _rescale_dets(dets, ratios, borders, sizes)
            _rescale_ex_pts(dets, ratios, borders, sizes)
            dets[:, :, 0:4] /= scale
            dets[:, :, 5:13] /= scale
            detections.append(dets)

        detections = np.concatenate(detections, axis=1)

        classes = detections[..., -1]
        classes = classes[0]
        detections = detections[0]

        # reject detections with negative scores
        keep_inds = (detections[:, 4] > 0)
        detections = detections[keep_inds]
        classes = classes[keep_inds]

        image_id = 0  # image ids
        top_bboxes[image_id] = {}
        for j in range(categories):
            keep_inds = (classes == j)
            top_bboxes[image_id][j + 1] = \
                detections[keep_inds].astype(np.float32)
            soft_nms(top_bboxes[image_id][j + 1],
                     Nt=nms_threshold,
                     method=nms_algorithm)

        scores = np.hstack(
            [top_bboxes[image_id][j][:, 4] for j in range(1, categories + 1)])
        if len(scores) > max_per_image:
            kth = len(scores) - max_per_image
            thresh = np.partition(scores, kth)[kth]
            for j in range(1, categories + 1):
                keep_inds = (top_bboxes[image_id][j][:, 4] >= thresh)
                top_bboxes[image_id][j] = top_bboxes[image_id][j][keep_inds]

        if suppres_ghost:
            for j in range(1, categories + 1):
                n = len(top_bboxes[image_id][j])
                for k in range(n):
                    inside_score = 0
                    if top_bboxes[image_id][j][k, 4] > 0.2:
                        for t in range(n):
                            if _box_inside(top_bboxes[image_id][j][t],
                                           top_bboxes[image_id][j][k]):
                                inside_score += top_bboxes[image_id][j][t, 4]
                        if inside_score > top_bboxes[image_id][j][k, 4] * 3:
                            top_bboxes[image_id][j][k, 4] /= 2

        # plot bound box and oct mask
        color_list = colormap(rgb=True)
        mask_color_id = 0
        # image = cv2.imread("/data/project/extremenet-inference/inputs/16004479832_a748d55f21_k.jpg")
        image = frame[1]
        input_image = image.copy()
        mask_image = image.copy()
        bboxes = {}
        for j in range(1, categories + 1):
            keep_inds = (top_bboxes[image_id][j][:, 4] > 0.5)
            cat_name = class_name[j]
            for bbox in top_bboxes[image_id][j][keep_inds]:
                sc = bbox[4]
                ex = bbox[5:13].astype(np.int32).reshape(4, 2)
                bbox = bbox[0:4].astype(np.int32)
                txt = '{}{:.2f}'.format(cat_name, sc)
                color_mask = color_list[mask_color_id % len(color_list), :3]
                mask_color_id += 1
                image = vis_bbox(
                    image,
                    (bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]))
                image = vis_class(image, (bbox[0], bbox[1] - 2), txt)
                image = vis_octagon(image, ex, color_mask)
                image = vis_ex(image, ex, color_mask)

        output_q.put((frame[0], image))
    fps.stop()
示例#8
0
    print("Video File:" + str(args.file_dir))

    json_file = os.path.join(system_configs.config_dir,
                             args.json_file + ".json")

    print("json_file: {}".format(json_file))

    with open(json_file, "r") as f:
        configs = json.load(
            f)  # Read .json file to retrieve 'system' and 'db' parameters

    configs["system"][
        "snapshot_name"] = args.json_file  # Insert model's name into configuration file
    system_configs.update_config(
        configs["system"]
    )  # Update config.py based on retrieved 'system' parameters
    db_configs.update_config(
        configs["db"])  # Update db/base.py based on retrieved 'db' parameters

    print("system config...")
    pprint.pprint(system_configs.full)  # Show 'system' parameters in terminal

    print("db config...")
    pprint.pprint(db_configs.full)  # Show 'db' parameters in terminal

    print("loading parameters at iteration: {}".format(
        args.testiter))  # Show args.testiter in terminal

    print("building neural network...")
    nnet = NetworkFactory()  # Initialise CenterNet's neural network
示例#9
0
        training_task.terminate()
    for validation_task in validation_tasks:
        validation_task.terminate()


if __name__ == "__main__":
    args = parse_args()  # 创建解析器对象

    cfg_file = os.path.join(
        system_configs.config_dir,
        args.cfg_file + ".json")  # 得到cfg.json文件的路径,config/CenterNet-xxx.json
    with open(cfg_file, "r") as f:  # 读取json文件
        configs = json.load(f)

    configs["system"]["snapshot_name"] = args.cfg_file  # 添加snapshot_name
    system_configs.update_config(configs["system"])  # 添加一项后,更新json

    train_split = system_configs.train_split  # "trainval"
    val_split = system_configs.val_split  # "minival"

    print("loading all datasets...")
    dataset = system_configs.dataset  # None, 据说默认为COCO
    # threads = max(torch.cuda.device_count() * 2, 4)
    threads = args.threads  # 获得线程默认4
    print("using {} threads".format(threads))  #
    training_dbs = [
        datasets[dataset](configs["db"], train_split) for _ in range(threads)
    ]  # 打印4次“start prefetching data...”,涉及构建训练集
    validation_db = datasets[dataset](
        configs["db"], val_split
    )  # 打印一次”start prefetching data...“,涉及构建验证集:图像读取,标志文件读取,图像预处理等
示例#10
0
                ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
            return ap

        print("The final evaluated AP: {}".format(voc_ap(rec, pre)))


if __name__ == '__main__':
    import cv2
    os.chdir('../')

    cfg_file = os.path.join(system_configs.config_dir, 'CenterNet-52.json')
    with open(cfg_file, 'r') as f:
        configs = json.load(f)

    configs['system']['snapshot_name'] = 'CenterNet-52'
    system_configs.update_config(configs['system'])

    val_split = system_configs.val_split
    val_db = CityPerson(configs['db'], val_split)

    ind = 1
    img_file = val_db.image_file(ind)
    detections = val_db.detections(ind)
    img = cv2.imread(img_file)

    for d in detections:
        cv2.rectangle(img, (int(d[0]), int(d[1])), (int(d[2]), int(d[3])),
                      color=(0, 0, 255))

    cv2.imshow('test', img)
    cv2.waitKey(0)
示例#11
0
    for training_task in training_tasks:
        training_task.terminate()
    for validation_task in validation_tasks:
        validation_task.terminate()


if __name__ == "__main__":
    args = parse_args()  # 输入解析器
    cfg_file = os.path.join(system_configs.config_dir,
                            args.cfg_file + ".json")  # CenterNet配置文件,模型深度,多类别
    with open(cfg_file, "r") as f:
        configs = json.load(f)  # 加载配置文件

    configs["system"][
        "snapshot_name"] = args.cfg_file  # 当前模型的名称,如 CenterNet-52,CenterNet-104
    system_configs.update_config(configs["system"])  # 更新系统配置文件
    train_split = system_configs.train_split  # 训练数据集
    val_split = system_configs.val_split  # 验证数据集
    # 加载所有数据集
    print("loading all datasets...")
    dataset = system_configs.dataset
    # threads = max(torch.cuda.device_count() * 2, 4)
    threads = args.threads
    print("using {} threads".format(threads))
    training_dbs = [
        datasets[dataset](configs["db"], train_split) for _ in range(threads)
    ]
    validation_db = datasets[dataset](configs["db"], val_split)
    print("system config...")
    pprint.pprint(system_configs.full)  # 输出系统配置
    print("db config...")
    for training_task in training_tasks:
        training_task.terminate()
    for validation_task in validation_tasks:
        validation_task.terminate()

if __name__ == "__main__":
    args = parse_args()

    # load default setting
    cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
    with open(cfg_file, "r") as f:
        configs = json.load(f)
            
    configs["system"]["snapshot_name"] = "-".join(args.cfg_file.split("-")[:2]) # get the model name
    # update the params
    system_configs.update_config(configs["system"])
    system_configs.update_config({"categories":configs["db"]["categories"]})
    train_split = system_configs.train_split
    val_split   = system_configs.val_split

    print("loading all datasets...")
    dataset = system_configs.dataset
    # threads = max(torch.cuda.device_count() * 2, 4)
    threads = args.threads
    print("using {} threads".format(threads))
    training_dbs  = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
    validation_db = datasets[dataset](configs["db"], val_split)

    print("system config...")
    pprint.pprint(system_configs.full)