Пример #1
0
    def __init__(self, configer):
        self.configer = configer

        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_parser = PoseParser(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.pose_net = None
Пример #2
0
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_parser = PoseParser(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = DataLoader(configer)
        self.heatmap_generator = HeatmapGenerator(configer)
        self.paf_generator = PafGenerator(configer)
        self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
        self.pose_net = None

        self._init_model()
Пример #3
0
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_schedule_loss = AverageMeter()
        self.train_losses = AverageMeter()
        self.train_loss_heatmap = AverageMeter()
        self.train_loss_associate = AverageMeter()
        self.val_losses = AverageMeter()
        self.val_loss_heatmap = AverageMeter()
        self.val_loss_associate = AverageMeter()
        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_loss_manager = LossManager(configer)
        self.pose_model_manager = ModelManager(configer)
        self.pose_data_loader = DataLoader(configer)
        self.heatmap_generator = HeatmapGenerator(configer)
        self.paf_generator = PafGenerator(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None
        self.runner_state = dict()

        self._init_model()
Пример #4
0
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.train_loss_heatmap = AverageMeter()
        self.train_loss_associate = AverageMeter()
        self.val_losses = AverageMeter()
        self.val_loss_heatmap = AverageMeter()
        self.val_loss_associate = AverageMeter()
        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_loss_manager = PoseLossManager(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)
        self.optim_scheduler = OptimScheduler(configer)
        self.heatmap_generator = HeatmapGenerator(configer)
        self.paf_generator = PafGenerator(configer)
        self.data_transformer = DataTransformer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None

        self._init_model()
    def __init__(self, configer):
        self.configer = configer

        self.pose_vis = PoseVisualizer(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.module_utilizer = ModuleUtilizer(configer)
        self.pose_net = None
Пример #6
0
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.vis = PoseVisualizer(configer)
        self.loss_manager = PoseLossManager(configer)
        self.model_manager = PoseModelManager(configer)
        self.data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.lr = None
        self.iters = None
Пример #7
0
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.pose_vis = PoseVisualizer(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)
        self.data_transformer = DataTransformer(configer)
        self.heatmap_generator = HeatmapGenerator(configer)
        self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
        self.pose_net = None

        self._init_model()
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.vis = PoseVisualizer(configer)
        self.loss_manager = PoseLossManager(configer)
        self.model_manager = PoseModelManager(configer)
        self.data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.lr = None
        self.iters = None
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.val_loss_heatmap = AverageMeter()
        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_loss_manager = PoseLossManager(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)
        self.optim_scheduler = OptimScheduler(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.scheduler = None
Пример #10
0
class CapsulePoseTest(object):
    def __init__(self, configer):
        self.configer = configer

        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_parser = PoseParser(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)
        self.device = torch.device(
            'cpu' if self.configer.get('gpu') is None else 'cuda')
        self.pose_net = None

    def init_model(self):
        self.pose_net = self.pose_model_manager.multi_pose_detector()
        self.pose_net = self.module_utilizer.load_net(self.pose_net)
        self.pose_net.eval()

    def __test_img(self, image_path, json_path, raw_path, vis_path):
        Log.info('Image Path: {}'.format(image_path))
        ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path))
        cur_img_rgb = ImageHelper.resize(ori_img_rgb,
                                         self.configer.get(
                                             'data', 'input_size'),
                                         interpolation=Image.CUBIC)

        ori_img_bgr = ImageHelper.bgr2rgb(ori_img_rgb)
        paf_avg, heatmap_avg, partmap_avg = self.__get_paf_and_heatmap(
            cur_img_rgb)
        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(
            cur_img_rgb, paf_avg, partmap_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k,
                                               all_peaks)
        json_dict = self.__get_info_tree(cur_img_rgb, subset, candidate)
        for i in range(len(json_dict['objects'])):
            for index in range(len(json_dict['objects'][i]['keypoints'])):
                if json_dict['objects'][i]['keypoints'][index][2] == -1:
                    continue

                json_dict['objects'][i]['keypoints'][index][0] *= (
                    ori_img_rgb.shape[1] / cur_img_rgb.shape[1])
                json_dict['objects'][i]['keypoints'][index][1] *= (
                    ori_img_rgb.shape[0] / cur_img_rgb.shape[0])

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(),
                                                    json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        cv2.imwrite(vis_path, image_canvas)
        cv2.imwrite(raw_path, ori_img_bgr)
        Log.info('Json Save Path: {}'.format(json_path))
        with open(json_path, 'w') as save_stream:
            save_stream.write(json.dumps(json_dict))

    def __get_info_tree(self, image_raw, subset, candidate):
        json_dict = dict()
        height, width, _ = image_raw.shape
        json_dict['image_height'] = height
        json_dict['image_width'] = width
        object_list = list()
        for n in range(len(subset)):
            if subset[n][-1] <= 1:
                continue

            object_dict = dict()
            object_dict['keypoints'] = np.zeros(
                (self.configer.get('data', 'num_keypoints'), 3)).tolist()
            for j in range(self.configer.get('data', 'num_keypoints')):
                index = subset[n][j]
                if index == -1:
                    object_dict['keypoints'][j][0] = -1
                    object_dict['keypoints'][j][1] = -1
                    object_dict['keypoints'][j][2] = -1

                else:
                    object_dict['keypoints'][j][0] = candidate[index.astype(
                        int)][0]
                    object_dict['keypoints'][j][1] = candidate[index.astype(
                        int)][1]
                    object_dict['keypoints'][j][2] = 1

            object_dict['score'] = subset[n][-2]
            object_list.append(object_dict)

        json_dict['objects'] = object_list
        return json_dict

    def __get_paf_and_heatmap(self, img_raw):
        multiplier = [
            scale * self.configer.get('data', 'input_size')[0] /
            img_raw.shape[1]
            for scale in self.configer.get('data', 'scale_search')
        ]

        heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1],
                                self.configer.get('data', 'num_keypoints')))
        paf_avg = np.zeros((img_raw.shape[0], img_raw.shape[1],
                            self.configer.get('network', 'paf_out')))
        partmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1],
                                self.configer.get('network', 'heatmap_out')))

        for i, scale in enumerate(multiplier):
            img_test = cv2.resize(img_raw, (0, 0),
                                  fx=scale,
                                  fy=scale,
                                  interpolation=cv2.INTER_CUBIC)
            img_test_pad, pad = PadImage(self.configer.get(
                'network', 'stride'))(img_test)
            pad_right = pad[2]
            pad_down = pad[3]
            img_test_pad = ToTensor()(img_test_pad)
            img_test_pad = Normalize(
                mean=self.configer.get('trans_params', 'mean'),
                std=self.configer.get('trans_params', 'std'))(img_test_pad)
            with torch.no_grad():
                img_test_pad = img_test_pad.unsqueeze(0).to(self.device)
                paf_out_list, partmap_out_list = self.pose_net(img_test_pad)

            paf_out = paf_out_list[-1]
            partmap_out = partmap_out_list[-1]
            partmap = partmap_out.data.squeeze().cpu().numpy().transpose(
                1, 2, 0)
            paf = paf_out.data.squeeze().cpu().numpy().transpose(1, 2, 0)
            # self.pose_visualizer.vis_tensor(heatmap_out)
            heatmap = np.zeros((partmap.shape[0], partmap.shape[1],
                                self.configer.get('data', 'num_keypoints')))
            part_num = np.zeros((self.configer.get('data', 'num_keypoints'), ))

            for index in range(len(self.configer.get('details', 'limb_seq'))):
                a = self.configer.get('details', 'limb_seq')[index][0] - 1
                b = self.configer.get('details', 'limb_seq')[index][1] - 1
                heatmap_a = partmap[:, :, index * 4:index * 4 + 2]**2
                heatmap_a = np.sqrt(np.sum(heatmap_a, axis=2).squeeze())
                heatmap[:, :, a] = (heatmap[:, :, a] * part_num[a] +
                                    heatmap_a) / (part_num[a] + 1)
                part_num[a] += 1
                heatmap_b = partmap[:, :, index * 4 + 2:index * 4 + 4]**2
                heatmap_b = np.sqrt(np.sum(heatmap_b, axis=2).squeeze())
                heatmap[:, :, b] = (heatmap[:, :, b] * part_num[b] +
                                    heatmap_b) / (part_num[b] + 1)
                part_num[b] += 1

            heatmap = cv2.resize(heatmap, (0, 0),
                                 fx=self.configer.get('network', 'stride'),
                                 fy=self.configer.get('network', 'stride'),
                                 interpolation=cv2.INTER_CUBIC)
            heatmap = heatmap[:img_test_pad.size(2) -
                              pad_down, :img_test_pad.size(3) - pad_right, :]
            heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]),
                                 interpolation=cv2.INTER_CUBIC)

            partmap = cv2.resize(partmap, (0, 0),
                                 fx=self.configer.get('network', 'stride'),
                                 fy=self.configer.get('network', 'stride'),
                                 interpolation=cv2.INTER_CUBIC)
            partmap = partmap[:img_test_pad.size(2) -
                              pad_down, :img_test_pad.size(3) - pad_right, :]
            partmap = cv2.resize(partmap, (img_raw.shape[1], img_raw.shape[0]),
                                 interpolation=cv2.INTER_CUBIC)

            paf = cv2.resize(paf, (0, 0),
                             fx=self.configer.get('network', 'stride'),
                             fy=self.configer.get('network', 'stride'),
                             interpolation=cv2.INTER_CUBIC)
            paf = paf[:img_test_pad.size(2) - pad_down, :img_test_pad.size(3) -
                      pad_right, :]
            paf = cv2.resize(paf, (img_raw.shape[1], img_raw.shape[0]),
                             interpolation=cv2.INTER_CUBIC)

            partmap_avg = partmap_avg + partmap / len(multiplier)
            heatmap_avg = heatmap_avg + heatmap / len(multiplier)
            paf_avg = paf_avg + paf / len(multiplier)

        return paf_avg, heatmap_avg, partmap_avg

    def __extract_heatmap_info(self, heatmap_avg):
        all_peaks = []
        peak_counter = 0

        for part in range(self.configer.get('data', 'num_keypoints')):
            map_ori = heatmap_avg[:, :, part]
            map_gau = gaussian_filter(map_ori, sigma=3)

            map_left = np.zeros(map_gau.shape)
            map_left[1:, :] = map_gau[:-1, :]
            map_right = np.zeros(map_gau.shape)
            map_right[:-1, :] = map_gau[1:, :]
            map_up = np.zeros(map_gau.shape)
            map_up[:, 1:] = map_gau[:, :-1]
            map_down = np.zeros(map_gau.shape)
            map_down[:, :-1] = map_gau[:, 1:]

            peaks_binary = np.logical_and.reduce(
                (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up,
                 map_gau >= map_down,
                 map_gau > self.configer.get('vis', 'part_threshold')))

            peaks = zip(
                np.nonzero(peaks_binary)[1],
                np.nonzero(peaks_binary)[0])  # note reverse
            peaks = list(peaks)
            peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks]
            ids = range(peak_counter, peak_counter + len(peaks))
            peaks_with_score_and_id = [
                peaks_with_score[i] + (ids[i], ) for i in range(len(ids))
            ]

            all_peaks.append(peaks_with_score_and_id)
            peak_counter += len(peaks)

        return all_peaks

    def __extract_paf_info(self, img_raw, paf_avg, partmap_avg, all_peaks):
        connection_all = []
        special_k = []
        mid_num = self.configer.get('vis', 'mid_point_num')

        for k in range(len(self.configer.get('details', 'limb_seq'))):
            score_mid = paf_avg[:, :, [k * 2, k * 2 + 1]]
            # self.pose_visualizer.vis_paf(score_mid, img_raw, name='pa{}'.format(k))
            candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] -
                              1]
            candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] -
                              1]
            nA = len(candA)
            nB = len(candB)
            if nA != 0 and nB != 0:
                connection_candidate = []
                for i in range(nA):
                    for j in range(nB):
                        vec_a = partmap_avg[candA[i][1], candA[i][0],
                                            k * 4:k * 4 + 2]
                        vec_b = -partmap_avg[candB[j][1], candB[j][0],
                                             k * 4 + 2:k * 4 + 4]
                        norm_a = math.sqrt(vec_a[0] * vec_a[0] +
                                           vec_a[1] * vec_a[1]) + 1e-9
                        vec_a = np.divide(vec_a, norm_a)
                        norm_b = math.sqrt(vec_b[0] * vec_b[0] +
                                           vec_b[1] * vec_b[1]) + 1e-9
                        vec_b = np.divide(vec_b, norm_b)

                        vec = np.subtract(candB[j][:2], candA[i][:2])
                        sim_length = np.sum(vec_a * vec + vec_b * vec) / 2.0
                        norm = math.sqrt(vec[0] * vec[0] +
                                         vec[1] * vec[1]) + 1e-9
                        vec = np.divide(vec, norm)

                        startend = zip(
                            np.linspace(candA[i][0], candB[j][0], num=mid_num),
                            np.linspace(candA[i][1], candB[j][1], num=mid_num))
                        startend = list(startend)

                        vec_x = np.array([
                            score_mid[int(round(startend[I][1])),
                                      int(round(startend[I][0])), 0]
                            for I in range(len(startend))
                        ])
                        vec_y = np.array([
                            score_mid[int(round(startend[I][1])),
                                      int(round(startend[I][0])), 1]
                            for I in range(len(startend))
                        ])

                        score_midpts = np.multiply(
                            vec_x, vec[0]) + np.multiply(vec_y, vec[1])
                        score_with_dist_prior = sum(score_midpts) / len(
                            score_midpts)
                        score_with_dist_prior += min(
                            0.5 * img_raw.shape[0] / norm - 1, 0)

                        num_positive = len(
                            np.nonzero(score_midpts > self.configer.get(
                                'vis', 'limb_threshold'))[0])
                        criterion1 = num_positive > int(
                            self.configer.get('vis', 'limb_pos_ratio') *
                            len(score_midpts))
                        criterion2 = score_with_dist_prior > 0
                        if criterion1 and criterion2 and sim_length > self.configer.get(
                                'vis', 'sim_length'):
                            connection_candidate.append([
                                i, j, score_with_dist_prior,
                                score_with_dist_prior + candA[i][2] +
                                candB[j][2]
                            ])

                connection_candidate = sorted(connection_candidate,
                                              key=lambda x: x[2],
                                              reverse=True)
                connection = np.zeros((0, 5))
                for c in range(len(connection_candidate)):
                    i, j, s = connection_candidate[c][0:3]
                    if i not in connection[:, 3] and j not in connection[:, 4]:
                        connection = np.vstack(
                            [connection, [candA[i][3], candB[j][3], s, i, j]])
                        if len(connection) >= min(nA, nB):
                            break

                connection_all.append(connection)
            else:
                special_k.append(k)
                connection_all.append([])

        return special_k, connection_all

    def __get_subsets(self, connection_all, special_k, all_peaks):
        # last number in each row is the total parts number of that person
        # the second last number in each row is the score of the overall configuration
        subset = -1 * np.ones(
            (0, self.configer.get('data', 'num_keypoints') + 2))
        candidate = np.array(
            [item for sublist in all_peaks for item in sublist])

        for k in self.configer.get('details', 'mini_tree'):
            if k not in special_k:
                partAs = connection_all[k][:, 0]
                partBs = connection_all[k][:, 1]
                indexA, indexB = np.array(
                    self.configer.get('details', 'limb_seq')[k]) - 1

                for i in range(len(connection_all[k])):  # = 1:size(temp,1)
                    found = 0
                    subset_idx = [-1, -1]
                    for j in range(len(subset)):  # 1:size(subset,1):
                        if subset[j][indexA] == partAs[i] or subset[j][
                                indexB] == partBs[i]:
                            subset_idx[found] = j
                            found += 1

                    if found == 1:
                        j = subset_idx[0]
                        if (subset[j][indexB] != partBs[i]):
                            subset[j][indexB] = partBs[i]
                            subset[j][-1] += 1
                            subset[j][-2] += candidate[
                                partBs[i].astype(int),
                                2] + connection_all[k][i][2]
                    elif found == 2:  # if found 2 and disjoint, merge them
                        j1, j2 = subset_idx
                        membership = ((subset[j1] >= 0).astype(int) +
                                      (subset[j2] >= 0).astype(int))[:-2]
                        if len(np.nonzero(membership == 2)[0]) == 0:  # merge
                            subset[j1][:-2] += (subset[j2][:-2] + 1)
                            subset[j1][-2:] += subset[j2][-2:]
                            subset[j1][-2] += connection_all[k][i][2]
                            subset = np.delete(subset, j2, 0)
                        else:  # as like found == 1
                            subset[j1][indexB] = partBs[i]
                            subset[j1][-1] += 1
                            subset[j1][-2] += candidate[
                                partBs[i].astype(int),
                                2] + connection_all[k][i][2]

                    # if find no partA in the subset, create a new subset
                    elif not found:
                        row = -1 * np.ones(
                            self.configer.get('data', 'num_keypoints') + 2)
                        row[indexA] = partAs[i]
                        row[indexB] = partBs[i]
                        row[-1] = 2
                        row[-2] = sum(
                            candidate[connection_all[k][i, :2].astype(int),
                                      2]) + connection_all[k][i][2]
                        subset = np.vstack([subset, row])

        return subset, candidate

    def test(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'val/results/pose',
                                self.configer.get('dataset'))

        test_img = self.configer.get('test_img')
        test_dir = self.configer.get('test_dir')
        if test_img is None and test_dir is None:
            Log.error('test_img & test_dir not exists.')
            exit(1)

        if test_img is not None and test_dir is not None:
            Log.error('Either test_img or test_dir.')
            exit(1)

        if test_img is not None:
            base_dir = os.path.join(base_dir, 'test_img')
            filename = test_img.rstrip().split('/')[-1]
            json_path = os.path.join(
                base_dir, 'json',
                '{}.json'.format('.'.join(filename.split('.')[:-1])))
            raw_path = os.path.join(base_dir, 'raw', filename)
            vis_path = os.path.join(
                base_dir, 'vis',
                '{}_vis.png'.format('.'.join(filename.split('.')[:-1])))
            if not os.path.exists(os.path.dirname(json_path)):
                os.makedirs(os.path.dirname(json_path))

            if not os.path.exists(os.path.dirname(raw_path)):
                os.makedirs(os.path.dirname(raw_path))

            if not os.path.exists(os.path.dirname(vis_path)):
                os.makedirs(os.path.dirname(vis_path))

            self.__test_img(test_img, json_path, raw_path, vis_path)

        else:
            base_dir = os.path.join(base_dir, 'test_dir',
                                    test_dir.rstrip('/').split('/')[-1])
            if not os.path.exists(base_dir):
                os.makedirs(base_dir)

            for filename in self.__list_dir(test_dir):
                image_path = os.path.join(test_dir, filename)
                json_path = os.path.join(
                    base_dir, 'json',
                    '{}.json'.format('.'.join(filename.split('.')[:-1])))
                raw_path = os.path.join(base_dir, 'raw', filename)
                vis_path = os.path.join(
                    base_dir, 'vis',
                    '{}_vis.png'.format('.'.join(filename.split('.')[:-1])))
                if not os.path.exists(os.path.dirname(json_path)):
                    os.makedirs(os.path.dirname(json_path))

                if not os.path.exists(os.path.dirname(raw_path)):
                    os.makedirs(os.path.dirname(raw_path))

                if not os.path.exists(os.path.dirname(vis_path)):
                    os.makedirs(os.path.dirname(vis_path))

                self.__test_img(image_path, json_path, raw_path, vis_path)

    def debug(self):
        base_dir = os.path.join(self.configer.get('project_dir'),
                                'vis/results/pose',
                                self.configer.get('dataset'), 'debug')

        if not os.path.exists(base_dir):
            os.makedirs(base_dir)

        val_data_loader = self.pose_data_loader.get_valloader()

        count = 0
        for i, (inputs, partmap, maskmap,
                vecmap) in enumerate(val_data_loader):
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 2:
                    exit(1)

                Log.info(partmap.size())
                ori_img = DeNormalize(
                    mean=self.configer.get('trans_params', 'mean'),
                    std=self.configer.get('trans_params', 'std'))(inputs[j])
                ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8)
                image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
                partmap_avg = partmap[j].numpy().transpose(1, 2, 0)

                heatmap_avg = np.zeros(
                    (partmap_avg.shape[0], partmap_avg.shape[1],
                     self.configer.get('data', 'num_keypoints')))
                part_num = np.zeros((self.configer.get('data',
                                                       'num_keypoints'), ))

                for index in range(
                        len(self.configer.get('details', 'limb_seq'))):
                    a = self.configer.get('details', 'limb_seq')[index][0] - 1
                    b = self.configer.get('details', 'limb_seq')[index][1] - 1
                    heatmap_a = partmap_avg[:, :, index * 4:index * 4 + 2]**2
                    heatmap_a = np.sqrt(np.sum(heatmap_a, axis=2).squeeze())
                    heatmap_avg[:, :,
                                a] = (heatmap_avg[:, :, a] * part_num[a] +
                                      heatmap_a) / (part_num[a] + 1)
                    part_num[a] += 1
                    heatmap_b = partmap_avg[:, :,
                                            index * 4 + 2:index * 4 + 4]**2
                    heatmap_b = np.sqrt(np.sum(heatmap_b, axis=2).squeeze())
                    heatmap_avg[:, :,
                                b] = (heatmap_avg[:, :, b] * part_num[b] +
                                      heatmap_b) / (part_num[b] + 1)
                    part_num[b] += 1

                partmap_avg = cv2.resize(
                    partmap_avg, (0, 0),
                    fx=self.configer.get('network', 'stride'),
                    fy=self.configer.get('network', 'stride'),
                    interpolation=cv2.INTER_CUBIC)
                heatmap_avg = cv2.resize(
                    heatmap_avg, (0, 0),
                    fx=self.configer.get('network', 'stride'),
                    fy=self.configer.get('network', 'stride'),
                    interpolation=cv2.INTER_CUBIC)
                paf_avg = vecmap[j].numpy().transpose(1, 2, 0)
                paf_avg = cv2.resize(paf_avg, (0, 0),
                                     fx=self.configer.get('network', 'stride'),
                                     fy=self.configer.get('network', 'stride'),
                                     interpolation=cv2.INTER_CUBIC)

                self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr)
                self.pose_visualizer.vis_paf(paf_avg, image_bgr)
                all_peaks = self.__extract_heatmap_info(heatmap_avg)
                special_k, connection_all = self.__extract_paf_info(
                    image_bgr, paf_avg, partmap_avg, all_peaks)
                subset, candidate = self.__get_subsets(connection_all,
                                                       special_k, all_peaks)
                json_dict = self.__get_info_tree(image_bgr, subset, candidate)
                image_canvas = self.pose_parser.draw_points(
                    image_bgr, json_dict)
                image_canvas = self.pose_parser.link_points(
                    image_canvas, json_dict)
                cv2.imwrite(
                    os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)),
                    image_canvas)
                cv2.imshow('main', image_canvas)
                cv2.waitKey()

    def __list_dir(self, dir_name):
        filename_list = list()
        for item in os.listdir(dir_name):
            if os.path.isdir(os.path.join(dir_name, item)):
                for filename in os.listdir(os.path.join(dir_name, item)):
                    filename_list.append('{}/{}'.format(item, filename))

            else:
                filename_list.append(item)

        return filename_list
Пример #11
0
class OpenPoseTest(object):
    def __init__(self, configer):
        self.configer = configer
        self.blob_helper = BlobHelper(configer)
        self.pose_visualizer = PoseVisualizer(configer)
        self.pose_parser = PoseParser(configer)
        self.pose_model_manager = PoseModelManager(configer)
        self.pose_data_loader = DataLoader(configer)
        self.heatmap_generator = HeatmapGenerator(configer)
        self.paf_generator = PafGenerator(configer)
        self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda')
        self.pose_net = None

        self._init_model()

    def _init_model(self):
        self.pose_net = self.pose_model_manager.multi_pose_detector()
        self.pose_net = RunnerHelper.load_net(self, self.pose_net)
        self.pose_net.eval()

    def _get_blob(self, ori_image, scale=None):
        assert scale is not None
        image = self.blob_helper.make_input(image=ori_image, scale=scale)

        b, c, h, w = image.size()
        border_hw = [h, w]
        if self.configer.exists('test', 'fit_stride'):
            stride = self.configer.get('test', 'fit_stride')

            pad_w = 0 if (w % stride == 0) else stride - (w % stride)  # right
            pad_h = 0 if (h % stride == 0) else stride - (h % stride)  # down

            expand_image = torch.zeros((b, c, h + pad_h, w + pad_w)).to(image.device)
            expand_image[:, :, 0:h, 0:w] = image
            image = expand_image

        return image, border_hw

    def __test_img(self, image_path, json_path, raw_path, vis_path):

        Log.info('Image Path: {}'.format(image_path))
        ori_image = ImageHelper.read_image(image_path,
                                           tool=self.configer.get('data', 'image_tool'),
                                           mode=self.configer.get('data', 'input_mode'))

        ori_width, ori_height = ImageHelper.get_size(ori_image)
        ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode'))
        heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out')))
        paf_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'paf_out')))
        multiplier = [scale * self.configer.get('test', 'input_size')[1] / ori_height
                      for scale in self.configer.get('test', 'scale_search')]
        stride = self.configer.get('network', 'stride')
        for i, scale in enumerate(multiplier):
            image, border_hw = self._get_blob(ori_image, scale=scale)
            with torch.no_grad():
                paf_out_list, heatmap_out_list = self.pose_net(image)
                paf_out = paf_out_list[-1]
                heatmap_out = heatmap_out_list[-1]

                # extract outputs, resize, and remove padding
                heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)

                heatmap = cv2.resize(heatmap, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                heatmap = cv2.resize(heatmap[:border_hw[0], :border_hw[1]],
                                     (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0)
                paf = cv2.resize(paf, None, fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
                paf = cv2.resize(paf[:border_hw[0], :border_hw[1]],
                                 (ori_width, ori_height), interpolation=cv2.INTER_CUBIC)

                heatmap_avg = heatmap_avg + heatmap / len(multiplier)
                paf_avg = paf_avg + paf / len(multiplier)

        all_peaks = self.__extract_heatmap_info(heatmap_avg)
        special_k, connection_all = self.__extract_paf_info(ori_img_bgr, paf_avg, all_peaks)
        subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
        json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate)

        image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict)
        image_canvas = self.pose_parser.link_points(image_canvas, json_dict)

        ImageHelper.save(image_canvas, vis_path)
        ImageHelper.save(ori_img_bgr, raw_path)
        Log.info('Json Save Path: {}'.format(json_path))
        JsonHelper.save_file(json_dict, json_path)

    def __get_info_tree(self, image_raw, subset, candidate):
        json_dict = dict()
        height, width, _ = image_raw.shape
        json_dict['image_height'] = height
        json_dict['image_width'] = width
        object_list = list()
        for n in range(len(subset)):
            if subset[n][-1] < self.configer.get('res', 'num_threshold'):
                continue

            if subset[n][-2] / subset[n][-1] < self.configer.get('res', 'avg_threshold'):
                continue

            object_dict = dict()
            object_dict['kpts'] = np.zeros((self.configer.get('data', 'num_kpts'), 3)).tolist()
            for j in range(self.configer.get('data', 'num_kpts')):
                index = subset[n][j]
                if index == -1:
                    object_dict['kpts'][j][0] = -1
                    object_dict['kpts'][j][1] = -1
                    object_dict['kpts'][j][2] = -1

                else:
                    object_dict['kpts'][j][0] = candidate[index.astype(int)][0]
                    object_dict['kpts'][j][1] = candidate[index.astype(int)][1]
                    object_dict['kpts'][j][2] = 1

            object_dict['score'] = subset[n][-2]
            object_list.append(object_dict)

        json_dict['objects'] = object_list
        return json_dict

    def __extract_heatmap_info(self, heatmap_avg):
        all_peaks = []
        peak_counter = 0

        for part in range(self.configer.get('data', 'num_kpts')):
            map_ori = heatmap_avg[:, :, part]
            map_gau = gaussian_filter(map_ori, sigma=3)

            map_left = np.zeros(map_gau.shape)
            map_left[1:, :] = map_gau[:-1, :]
            map_right = np.zeros(map_gau.shape)
            map_right[:-1, :] = map_gau[1:, :]
            map_up = np.zeros(map_gau.shape)
            map_up[:, 1:] = map_gau[:, :-1]
            map_down = np.zeros(map_gau.shape)
            map_down[:, :-1] = map_gau[:, 1:]

            peaks_binary = np.logical_and.reduce(
                (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up,
                 map_gau >= map_down, map_gau > self.configer.get('res', 'part_threshold')))

            peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])  # note reverse
            peaks = list(peaks)

            '''
            del_flag = [0 for i in range(len(peaks))]
            for i in range(len(peaks)):
                if del_flag[i] == 0:
                    for j in range(i+1, len(peaks)):
                        if max(abs(peaks[i][0] - peaks[j][0]), abs(peaks[i][1] - peaks[j][1])) <= 6:
                            del_flag[j] = 1

            new_peaks = list()
            for i in range(len(peaks)):
                if del_flag[i] == 0:
                    new_peaks.append(peaks[i])

            peaks = new_peaks
            '''

            peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
            ids = range(peak_counter, peak_counter + len(peaks))
            peaks_with_score_and_id = [peaks_with_score[i] + (ids[i],) for i in range(len(ids))]

            all_peaks.append(peaks_with_score_and_id)
            peak_counter += len(peaks)

        return all_peaks

    def __extract_paf_info(self, img_raw, paf_avg, all_peaks):
        connection_all = []
        special_k = []
        mid_num = self.configer.get('res', 'mid_point_num')

        for k in range(len(self.configer.get('details', 'limb_seq'))):
            score_mid = paf_avg[:, :, [k*2, k*2+1]]
            candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1]
            candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1]
            nA = len(candA)
            nB = len(candB)
            if nA != 0 and nB != 0:
                connection_candidate = []
                for i in range(nA):
                    for j in range(nB):
                        vec = np.subtract(candB[j][:2], candA[i][:2])
                        norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9
                        vec = np.divide(vec, norm)

                        startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num),
                                       np.linspace(candA[i][1], candB[j][1], num=mid_num))
                        startend = list(startend)

                        vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0]
                                          for I in range(len(startend))])
                        vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1]
                                          for I in range(len(startend))])

                        score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
                        score_with_dist_prior = sum(score_midpts) / len(score_midpts)
                        score_with_dist_prior += min(0.5 * img_raw.shape[0] / norm - 1, 0)

                        num_positive = len(np.nonzero(score_midpts > self.configer.get('res', 'limb_threshold'))[0])
                        criterion1 = num_positive > int(self.configer.get('res', 'limb_pos_ratio') * len(score_midpts))
                        criterion2 = score_with_dist_prior > 0
                        if criterion1 and criterion2:
                            connection_candidate.append(
                                [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])

                connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
                connection = np.zeros((0, 5))
                for c in range(len(connection_candidate)):
                    i, j, s = connection_candidate[c][0:3]
                    if i not in connection[:, 3] and j not in connection[:, 4]:
                        connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
                        if len(connection) >= min(nA, nB):
                            break

                connection_all.append(connection)
            else:
                special_k.append(k)
                connection_all.append([])

        return special_k, connection_all

    def __get_subsets(self, connection_all, special_k, all_peaks):
        # last number in each row is the total parts number of that person
        # the second last number in each row is the score of the overall configuration
        subset = -1 * np.ones((0, self.configer.get('data', 'num_kpts') + 2))
        candidate = np.array([item for sublist in all_peaks for item in sublist])

        for k in self.configer.get('details', 'mini_tree'):
            if k not in special_k:
                partAs = connection_all[k][:, 0]
                partBs = connection_all[k][:, 1]
                indexA, indexB = np.array(self.configer.get('details', 'limb_seq')[k]) - 1

                for i in range(len(connection_all[k])):  # = 1:size(temp,1)
                    found = 0
                    subset_idx = [-1, -1]
                    for j in range(len(subset)):  # 1:size(subset,1):
                        if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
                            subset_idx[found] = j
                            found += 1

                    if found == 1:
                        j = subset_idx[0]
                        if (subset[j][indexB] != partBs[i]):
                            subset[j][indexB] = partBs[i]
                            subset[j][-1] += 1
                            subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
                    elif found == 2:  # if found 2 and disjoint, merge them
                        j1, j2 = subset_idx
                        membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
                        if len(np.nonzero(membership == 2)[0]) == 0:  # merge
                            subset[j1][:-2] += (subset[j2][:-2] + 1)
                            subset[j1][-2:] += subset[j2][-2:]
                            subset[j1][-2] += connection_all[k][i][2]
                            subset = np.delete(subset, j2, 0)
                        else:  # as like found == 1
                            subset[j1][indexB] = partBs[i]
                            subset[j1][-1] += 1
                            subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]

                    # if find no partA in the subset, create a new subset
                    elif not found:
                        row = -1 * np.ones(self.configer.get('data', 'num_kpts') + 2)
                        row[indexA] = partAs[i]
                        row[indexB] = partBs[i]
                        row[-1] = 2
                        row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
                        subset = np.vstack([subset, row])

        return subset, candidate

    def debug(self, vis_dir):
        for i, data_dict in enumerate(self.pose_data_loader.get_trainloader()):
            inputs = data_dict['img']
            maskmap = data_dict['maskmap']
            heatmap = data_dict['heatmap']
            vecmap = data_dict['vecmap']
            for j in range(inputs.size(0)):
                count = count + 1
                if count > 10:
                    exit(1)

                Log.info(heatmap.size())
                image_bgr = self.blob_helper.tensor2bgr(inputs[j])
                mask_canvas = maskmap[j].repeat(3, 1, 1).numpy().transpose(1, 2, 0)
                mask_canvas = (mask_canvas * 255).astype(np.uint8)
                mask_canvas = cv2.resize(mask_canvas, (0, 0), fx=self.configer.get('network', 'stride'),
                                         fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)

                image_bgr = cv2.addWeighted(image_bgr, 0.6, mask_canvas, 0.4, 0)
                heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0)
                heatmap_avg = cv2.resize(heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'),
                                     fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
                paf_avg = vecmap[j].numpy().transpose(1, 2, 0)
                paf_avg = cv2.resize(paf_avg, (0, 0), fx=self.configer.get('network', 'stride'),
                                     fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC)
                self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr)
                self.pose_visualizer.vis_paf(paf_avg, image_bgr)
                all_peaks = self.__extract_heatmap_info(heatmap_avg)
                special_k, connection_all = self.__extract_paf_info(image_bgr, paf_avg, all_peaks)
                subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks)
                json_dict = self.__get_info_tree(image_bgr, subset, candidate)
                image_canvas = self.pose_parser.draw_points(image_bgr, json_dict)
                image_canvas = self.pose_parser.link_points(image_canvas, json_dict)
                cv2.imwrite(os.path.join(vis_dir, '{}_{}_vis.png'.format(i, j)), image_canvas)
                cv2.imshow('main', image_canvas)
                cv2.waitKey()
class ConvPoseMachine(object):
    """
      The class for Pose Estimation. Include train, val, val & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.pose_visualizer = PoseVisualizer(configer)
        self.loss_manager = PoseLossManager(configer)
        self.model_manager = PoseModelManager(configer)
        self.train_utilizer = ModuleUtilizer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.best_model_loss = None
        self.is_best = None
        self.lr = None
        self.iters = None

    def init_model(self, train_loader=None, val_loader=None):
        self.pose_net = self.model_manager.pose_detector()

        self.pose_net, self.iters = self.train_utilizer.load_net(self.pose_net)

        self.optimizer = self.train_utilizer.update_optimizer(self.pose_net, self.iters)

        self.train_loader = train_loader
        self.val_loader = val_loader

        self.heatmap_loss = self.loss_manager.get_pose_loss('heatmap_loss')

    def __train(self):
        """
          Train function of every epoch during train phase.
        """
        self.pose_net.train()
        start_time = time.time()

        # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects)
        for i, data_tuple in enumerate(self.train_loader):
            self.data_time.update(time.time() - start_time)
            # Change the data type.
            if len(data_tuple) < 2:
                Log.error('Train Loader Error!')
                exit(0)

            inputs = Variable(data_tuple[0].cuda(async=True))
            heatmap = Variable(data_tuple[1].cuda(async=True))
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True))

            self.pose_visualizer.vis_tensor(heatmap, name='heatmap')
            self.pose_visualizer.vis_tensor((inputs*256+128)/255, name='image')
            # Forward pass.
            outputs = self.pose_net(inputs)

            self.pose_visualizer.vis_tensor(outputs, name='output')
            self.pose_visualizer.vis_peaks(inputs, outputs, name='peak')
            # Compute the loss of the train batch & backward.
            loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap)
            loss = loss_heatmap

            self.train_losses.update(loss.data[0], inputs.size(0))
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.iters += 1

            # Print the log info & reset the states.
            if self.iters % self.configer.get('solver', 'display_iter') == 0:
                Log.info('Train Iteration: {0}\t'
                         'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                         'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                         'Learning rate = {2}\n'
                         'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                         self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time,
                         data_time=self.data_time, loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            # Check to val the current model.
            if self.val_loader is not None and \
               self.iters % self.configer.get('solver', 'test_interval') == 0:
                self.__val()

            self.optimizer = self.train_utilizer.update_optimizer(self.pose_net, self.iters)

    def __val(self):
        """
          Validation function during the train phase.
        """
        self.pose_net.eval()
        start_time = time.time()

        for j, data_tuple in enumerate(self.val_loader):
            # Change the data type.
            inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
            heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True)
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True), volatile=True)

            # Forward pass.
            outputs = self.pose_net(inputs)
            self.pose_visualizer.vis_peaks(inputs, outputs, name='peak_val')
            # Compute the loss of the val batch.
            loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap)
            loss = loss_heatmap

            self.val_losses.update(loss.data[0], inputs.size(0))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        # Print the log info & reset the states.
        Log.info(
            'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
            'Loss {loss.avg:.8f}\n'.format(
            batch_time=self.batch_time, loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.pose_net.train()

    def train(self):
        cudnn.benchmark = True
        while self.iters < self.configer.get('solver', 'max_iter'):
            self.__train()
            if self.iters == self.configer.get('solver', 'max_iter'):
                break

    def test(self, img_path=None, img_dir=None):
        if img_path is not None and os.path.exists(img_path):
            image = Image.open(img_path).convert('RGB')
Пример #13
0
class OpenPose(object):
    """
      The class for Pose Estimation. Include train, val, test & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.vis = PoseVisualizer(configer)
        self.loss_manager = PoseLossManager(configer)
        self.model_manager = PoseModelManager(configer)
        self.data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.lr = None
        self.iters = None

    def init_model(self):
        self.pose_net = self.model_manager.pose_detector()
        self.iters = 0

        self.pose_net, _ = self.module_utilizer.load_net(self.pose_net)

        self.optimizer, self.lr = self.module_utilizer.update_optimizer(
            self.pose_net, self.iters)

        if self.configer.get('dataset') == 'coco':
            self.train_loader = self.data_loader.get_trainloader(OPCocoLoader)
            self.val_loader = self.data_loader.get_valloader(OPCocoLoader)

        else:
            Log.error('Dataset: {} is not valid!'.format(
                self.configer.get('dataset')))
            exit(1)

        self.mse_loss = self.loss_manager.get_pose_loss('mse_loss')

    def __train(self):
        """
          Train function of every epoch during train phase.
        """
        self.pose_net.train()
        start_time = time.time()

        # data_tuple: (inputs, heatmap, maskmap, vecmap)
        for i, data_tuple in enumerate(self.train_loader):
            self.data_time.update(time.time() - start_time)
            # Change the data type.
            if len(data_tuple) < 2:
                Log.error('Train Loader Error!')
                exit(0)

            inputs = Variable(data_tuple[0].cuda(async=True))
            heatmap = Variable(data_tuple[1].cuda(async=True))
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True))

            # Forward pass.
            paf_out, heatmap_out = self.pose_net(inputs)
            self.vis.vis_paf(paf_out,
                             inputs.data.cpu().squeeze().numpy().transpose(
                                 1, 2, 0),
                             name='paf_out')
            # Compute the loss of the train batch & backward.
            loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap)
            loss = loss_heatmap
            if len(data_tuple) > 3:
                vecmap = Variable(data_tuple[3].cuda(async=True))
                self.vis.vis_paf(vecmap,
                                 inputs.data.cpu().squeeze().numpy().transpose(
                                     1, 2, 0),
                                 name='paf')
                loss_associate = self.mse_loss(paf_out, vecmap, maskmap)
                loss += loss_associate

            self.train_losses.update(loss.data[0], inputs.size(0))
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.iters += 1

            # Print the log info & reset the states.
            if self.iters % self.configer.get('solver', 'display_iter') == 0:
                Log.info(
                    'Train Iteration: {0}\t'
                    'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {2}\n'
                    'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                        self.iters,
                        self.configer.get('solver', 'display_iter'),
                        self.lr,
                        batch_time=self.batch_time,
                        data_time=self.data_time,
                        loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            # Check to val the current model.
            if self.val_loader is not None and \
               self.iters % self.configer.get('solver', 'test_interval') == 0:
                self.__val()

            # Adjust the learning rate after every iteration.
            self.optimizer, self.lr = self.module_utilizer.update_optimizer(
                self.pose_net, self.iters)

    def __val(self):
        """
          Validation function during the train phase.
        """
        self.pose_net.eval()
        start_time = time.time()

        for j, data_tuple in enumerate(self.val_loader):
            # Change the data type.
            inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
            heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True)
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True),
                                   volatile=True)

            # Forward pass.
            paf_out, heatmap_out = self.pose_net(inputs)
            # Compute the loss of the val batch.
            loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap)
            loss = loss_heatmap

            if len(data_tuple) > 3:
                vecmap = Variable(data_tuple[3].cuda(async=True),
                                  volatile=True)
                loss_associate = self.mse_loss(paf_out, vecmap, maskmap)
                loss = loss_heatmap + loss_associate

            self.val_losses.update(loss.data[0], inputs.size(0))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        self.module_utilizer.save_net(self.pose_net, self.iters)

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.pose_net.train()

    def train(self):
        cudnn.benchmark = True
        while self.iters < self.configer.get('solver', 'max_iter'):
            self.__train()
            if self.iters == self.configer.get('solver', 'max_iter'):
                break
Пример #14
0
class ConvPoseMachine(object):
    """
      The class for Pose Estimation. Include train, val, val & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.pose_visualizer = PoseVisualizer(configer)
        self.loss_manager = PoseLossManager(configer)
        self.model_manager = PoseModelManager(configer)
        self.train_utilizer = ModuleUtilizer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.best_model_loss = None
        self.is_best = None
        self.lr = None
        self.iters = None

    def init_model(self, train_loader=None, val_loader=None):
        self.pose_net = self.model_manager.pose_detector()

        self.pose_net, self.iters = self.train_utilizer.load_net(self.pose_net)

        self.optimizer = self.train_utilizer.update_optimizer(
            self.pose_net, self.iters)

        self.train_loader = train_loader
        self.val_loader = val_loader

        self.heatmap_loss = self.loss_manager.get_pose_loss('heatmap_loss')

    def __train(self):
        """
          Train function of every epoch during train phase.
        """
        self.pose_net.train()
        start_time = time.time()

        # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects)
        for i, data_tuple in enumerate(self.train_loader):
            self.data_time.update(time.time() - start_time)
            # Change the data type.
            if len(data_tuple) < 2:
                Log.error('Train Loader Error!')
                exit(0)

            inputs = Variable(data_tuple[0].cuda(async=True))
            heatmap = Variable(data_tuple[1].cuda(async=True))
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True))

            self.pose_visualizer.vis_tensor(heatmap, name='heatmap')
            self.pose_visualizer.vis_tensor((inputs * 256 + 128) / 255,
                                            name='image')
            # Forward pass.
            outputs = self.pose_net(inputs)

            self.pose_visualizer.vis_tensor(outputs, name='output')
            self.pose_visualizer.vis_peaks(inputs, outputs, name='peak')
            # Compute the loss of the train batch & backward.
            loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap)
            loss = loss_heatmap

            self.train_losses.update(loss.data[0], inputs.size(0))
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.iters += 1

            # Print the log info & reset the states.
            if self.iters % self.configer.get('solver', 'display_iter') == 0:
                Log.info(
                    'Train Iteration: {0}\t'
                    'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {2}\n'
                    'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                        self.iters,
                        self.configer.get('solver', 'display_iter'),
                        self.lr,
                        batch_time=self.batch_time,
                        data_time=self.data_time,
                        loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            # Check to val the current model.
            if self.val_loader is not None and \
               self.iters % self.configer.get('solver', 'test_interval') == 0:
                self.__val()

            self.optimizer = self.train_utilizer.update_optimizer(
                self.pose_net, self.iters)

    def __val(self):
        """
          Validation function during the train phase.
        """
        self.pose_net.eval()
        start_time = time.time()

        for j, data_tuple in enumerate(self.val_loader):
            # Change the data type.
            inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
            heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True)
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True),
                                   volatile=True)

            # Forward pass.
            outputs = self.pose_net(inputs)
            self.pose_visualizer.vis_peaks(inputs, outputs, name='peak_val')
            # Compute the loss of the val batch.
            loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap)
            loss = loss_heatmap

            self.val_losses.update(loss.data[0], inputs.size(0))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        # Print the log info & reset the states.
        Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time,
                                                loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.pose_net.train()

    def train(self):
        cudnn.benchmark = True
        while self.iters < self.configer.get('solver', 'max_iter'):
            self.__train()
            if self.iters == self.configer.get('solver', 'max_iter'):
                break

    def test(self, img_path=None, img_dir=None):
        if img_path is not None and os.path.exists(img_path):
            image = Image.open(img_path).convert('RGB')
Пример #15
0
class OpenPose(object):
    """
      The class for Pose Estimation. Include train, val, test & predict.
    """
    def __init__(self, configer):
        self.configer = configer
        self.batch_time = AverageMeter()
        self.data_time = AverageMeter()
        self.train_losses = AverageMeter()
        self.val_losses = AverageMeter()
        self.vis = PoseVisualizer(configer)
        self.loss_manager = PoseLossManager(configer)
        self.model_manager = PoseModelManager(configer)
        self.data_loader = PoseDataLoader(configer)
        self.module_utilizer = ModuleUtilizer(configer)

        self.pose_net = None
        self.train_loader = None
        self.val_loader = None
        self.optimizer = None
        self.lr = None
        self.iters = None

    def init_model(self):
        self.pose_net = self.model_manager.pose_detector()
        self.iters = 0

        self.pose_net, _ = self.module_utilizer.load_net(self.pose_net)

        self.optimizer, self.lr = self.module_utilizer.update_optimizer(self.pose_net, self.iters)

        if self.configer.get('dataset') == 'coco':
            self.train_loader = self.data_loader.get_trainloader(OPCocoLoader)
            self.val_loader = self.data_loader.get_valloader(OPCocoLoader)

        else:
            Log.error('Dataset: {} is not valid!'.format(self.configer.get('dataset')))
            exit(1)

        self.mse_loss = self.loss_manager.get_pose_loss('mse_loss')

    def __train(self):
        """
          Train function of every epoch during train phase.
        """
        self.pose_net.train()
        start_time = time.time()

        # data_tuple: (inputs, heatmap, maskmap, vecmap)
        for i, data_tuple in enumerate(self.train_loader):
            self.data_time.update(time.time() - start_time)
            # Change the data type.
            if len(data_tuple) < 2:
                Log.error('Train Loader Error!')
                exit(0)

            inputs = Variable(data_tuple[0].cuda(async=True))
            heatmap = Variable(data_tuple[1].cuda(async=True))
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True))

            # Forward pass.
            paf_out, heatmap_out = self.pose_net(inputs)
            self.vis.vis_paf(paf_out, inputs.data.cpu().squeeze().numpy().transpose(1, 2, 0), name='paf_out')
            # Compute the loss of the train batch & backward.
            loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap)
            loss = loss_heatmap
            if len(data_tuple) > 3:
                vecmap = Variable(data_tuple[3].cuda(async=True))
                self.vis.vis_paf(vecmap, inputs.data.cpu().squeeze().numpy().transpose(1, 2, 0), name='paf')
                loss_associate = self.mse_loss(paf_out, vecmap, maskmap)
                loss += loss_associate

            self.train_losses.update(loss.data[0], inputs.size(0))
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            # Update the vars of the train phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()
            self.iters += 1

            # Print the log info & reset the states.
            if self.iters % self.configer.get('solver', 'display_iter') == 0:
                Log.info('Train Iteration: {0}\t'
                         'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                         'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                         'Learning rate = {2}\n'
                         'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                         self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time,
                         data_time=self.data_time, loss=self.train_losses))
                self.batch_time.reset()
                self.data_time.reset()
                self.train_losses.reset()

            # Check to val the current model.
            if self.val_loader is not None and \
               self.iters % self.configer.get('solver', 'test_interval') == 0:
                self.__val()

            # Adjust the learning rate after every iteration.
            self.optimizer, self.lr = self.module_utilizer.update_optimizer(self.pose_net, self.iters)

    def __val(self):
        """
          Validation function during the train phase.
        """
        self.pose_net.eval()
        start_time = time.time()

        for j, data_tuple in enumerate(self.val_loader):
            # Change the data type.
            inputs = Variable(data_tuple[0].cuda(async=True), volatile=True)
            heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True)
            maskmap = None
            if len(data_tuple) > 2:
                maskmap = Variable(data_tuple[2].cuda(async=True), volatile=True)

            # Forward pass.
            paf_out, heatmap_out = self.pose_net(inputs)
            # Compute the loss of the val batch.
            loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap)
            loss = loss_heatmap

            if len(data_tuple) > 3:
                vecmap = Variable(data_tuple[3].cuda(async=True), volatile=True)
                loss_associate = self.mse_loss(paf_out, vecmap, maskmap)
                loss = loss_heatmap + loss_associate

            self.val_losses.update(loss.data[0], inputs.size(0))

            # Update the vars of the val phase.
            self.batch_time.update(time.time() - start_time)
            start_time = time.time()

        self.module_utilizer.save_net(self.pose_net, self.iters)

        # Print the log info & reset the states.
        Log.info(
            'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
            'Loss {loss.avg:.8f}\n'.format(
            batch_time=self.batch_time, loss=self.val_losses))
        self.batch_time.reset()
        self.val_losses.reset()
        self.pose_net.train()

    def train(self):
        cudnn.benchmark = True
        while self.iters < self.configer.get('solver', 'max_iter'):
            self.__train()
            if self.iters == self.configer.get('solver', 'max_iter'):
                break