Esempio n. 1
0
    def get_batch(self):
        # slice roidb
        cur_from = self.cur
        cur_to = min(cur_from + self.batch_size, self.size)
        roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]

        # decide multi device slice
        work_load_list = self.work_load_list
        ctx = self.ctx
        if work_load_list is None:
            work_load_list = [1] * len(ctx)
        assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
            "Invalid settings for work load. "
        slices = _split_input_slice(self.batch_size, work_load_list)

        # get testing data for multigpu
        data_list = []
        label_list = []
        for islice in slices:
            iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
            data, label = get_rpn_batch(iroidb)
            data_list.append(data)
            label_list.append(label)

        # pad data first and then assign anchor (read label)
        data_tensor = tensor_vstack([batch['data'] for batch in data_list])
        for data, data_pad in zip(data_list, data_tensor):
            data['data'] = data_pad[np.newaxis, :]

        new_label_list = []
        for data, label in zip(data_list, label_list):
            # infer label shape
            data_shape = {k: v.shape for k, v in data.items()}
            del data_shape['im_info']
            _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
            feat_shape = [int(i) for i in feat_shape[0]]

            # add gt_boxes to data for e2e
            data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
            data['gt_keypoints'] = label['gt_keypoints'][np.newaxis, :, :]

            # assign anchor for label
            label = assign_anchor(feat_shape, label['gt_boxes'],
                                  data['im_info'], self.feat_stride,
                                  self.anchor_scales, self.anchor_ratios,
                                  self.allowed_border)
            new_label_list.append(label)

        all_data = dict()
        for key in self.data_name:
            all_data[key] = tensor_vstack([batch[key] for batch in data_list])

        all_label = dict()
        for key in self.label_name:
            pad = -1 if key == 'label' else 0
            all_label[key] = tensor_vstack(
                [batch[key] for batch in new_label_list], pad=pad)

        self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
        self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Esempio n. 2
0
    def parfetch(self, roidb):
        # get data for multi-gpu
        data, label = get_rpn_batch(roidb)

        data_shape = {k: v.shape for k, v in data.items()}
        # not use im info for RPN training
        del data_shape['im_info']
        _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
        feat_shape = [int(i) for i in feat_shape[0]]

        # add gt_boxes to data for e2e
        data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]

        # add gt_masks to data for e2e
        assert len(roidb) == 1
        gt_masks = get_gt_masks(roidb[0]['cache_seg_inst'],
                                data['im_info'][0, :2].astype('int'))
        data['gt_masks'] = gt_masks

        # assign anchor for label
        label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'],
                              self.feat_stride, self.anchor_scales,
                              self.anchor_ratios, self.allowed_border)

        return {'data': data, 'label': label}
Esempio n. 3
0
 def infer_shape(self, max_data_shape=None, max_label_shape=None):
     """ Return maximum data and label shape for single gpu """
     if max_data_shape is None:
         max_data_shape = []
     if max_label_shape is None:
         max_label_shape = []
     max_shapes = dict(max_data_shape + max_label_shape)
     input_batch_size = max_shapes['data'][0]
     im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
     label = {}
     for i in range(len(self.feat_sym)):
         _feat_sym = self.feat_sym[i]
         stride = self.feat_stride[i]
         _, feat_shape, _ = _feat_sym.infer_shape(**max_shapes)
         temp_dict = (assign_anchor(feat_shape[0], np.zeros(
             (0, 5)), im_info, stride, self.anchor_scales,
                                    self.anchor_ratios,
                                    self.allowed_border))
         label[self.label_name[3 *
                               i]] = temp_dict[self.label_name_simple[0]]
         label[self.label_name[3 * i +
                               1]] = temp_dict[self.label_name_simple[1]]
         label[self.label_name[3 * i +
                               2]] = temp_dict[self.label_name_simple[2]]
     # print(label)
     label = [label[k] for k in self.label_name]
     label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:])))
                    for k, v in zip(self.label_name, label)]
     return max_data_shape, label_shape
Esempio n. 4
0
    def get_batch(self):
        # slice roidb
        cur_from = self.cur
        cur_to = min(cur_from + self.batch_size, self.size)
        roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]

        # decide multi device slice
        work_load_list = self.work_load_list
        ctx = self.ctx
        if work_load_list is None:
            work_load_list = [1] * len(ctx)
        assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
            "Invalid settings for work load. "
        slices = _split_input_slice(self.batch_size, work_load_list)

        # get testing data for multigpu
        data_list = []
        label_list = []
        for islice in slices:
            iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
            data, label = get_rpn_batch(iroidb)
            data_list.append(data)
            label_list.append(label)

        # pad data first and then assign anchor (read label)
        data_tensor = tensor_vstack([batch['data'] for batch in data_list])
        for data, data_pad in zip(data_list, data_tensor):
            data['data'] = data_pad[np.newaxis, :]

        new_label_list = []
        for data, label in zip(data_list, label_list):
            # infer label shape
            data_shape = {k: v.shape for k, v in data.items()}
            del data_shape['im_info']
            _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape)
            feat_shape = [int(i) for i in feat_shape[0]]

            # add gt_boxes to data for e2e
            data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]

            # assign anchor for label
            label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'],
                                  self.feat_stride, self.anchor_scales,
                                  self.anchor_ratios, self.allowed_border)
            new_label_list.append(label)

        all_data = dict()
        for key in self.data_name:
            all_data[key] = tensor_vstack([batch[key] for batch in data_list])

        all_label = dict()
        for key in self.label_name:
            pad = -1 if key == 'label' else 0
            all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)

        self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
        self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
Esempio n. 5
0
    def _assign_anchor(self, data_shape, gt_boxes, im_info):
        anchors = []
        for fs in self.feat_sym:
            _, feat_shape, _ = fs.infer_shape(**data_shape)
            feat_shape = [int(i) for i in feat_shape[0]]
        anchors.append(
            assign_anchor(feat_shape, gt_boxes, im_info, self.feat_stride,
                          self.anchor_scales, self.anchor_ratios,
                          self.allowed_border))

        return anchors
Esempio n. 6
0
 def infer_shape(self, max_data_shape=None, max_label_shape=None):
     """ Return maximum data and label shape for single gpu """
     if max_data_shape is None:
         max_data_shape = []
     if max_label_shape is None:
         max_label_shape = []
     max_shapes = dict(max_data_shape + max_label_shape)
     input_batch_size = max_shapes['data'][0]
     im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
     _, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
     label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info,
                           self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
     label = [label[k] for k in self.label_name]
     label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
     return max_data_shape, label_shape
Esempio n. 7
0
 def infer_shape(self, max_data_shape=None, max_label_shape=None):
     """ Return maximum data and label shape for single gpu """
     if max_data_shape is None:
         max_data_shape = []
     if max_label_shape is None:
         max_label_shape = []
     max_shapes = dict(max_data_shape + max_label_shape)
     input_batch_size = max_shapes['data'][0]
     im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
     _, feat_shape, _ = self.feat_sym.infer_shape(**max_shapes)
     label = assign_anchor(feat_shape[0], np.zeros((0, 5)), im_info,
                           self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border)
     label = [label[k] for k in self.label_name]
     label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
     return max_data_shape, label_shape