def get_batch(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] if self.mode == 'test': self.data, self.label = minibatch.get_minibatch(roidb, self.num_classes, self.mode) else: work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = minibatch.get_minibatch(iroidb, self.num_classes, self.mode) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # assign anchor for label label = minibatch.assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) del data['im_info'] new_label_list.append(label) all_data = dict() for key in ['data']: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() all_label['label'] = tensor_vstack([batch['label'] for batch in new_label_list], pad=-1) for key in ['bbox_target', 'bbox_inside_weight', 'bbox_outside_weight']: all_label[key] = tensor_vstack([batch[key] for batch in new_label_list]) self.data = [mx.nd.array(all_data['data'])] self.label = [mx.nd.array(all_label['label']), mx.nd.array(all_label['bbox_target']), mx.nd.array(all_label['bbox_inside_weight']), mx.nd.array(all_label['bbox_outside_weight'])]
def _get_train_batch(self): """ utilize minibatch sampling, e.g. 2 images and 64 rois per image :return: training batch (e.g. 128 samples) """ work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) cur_from = self.cur cur_to = cur_from + self.batch_size if cur_to <= self.size: roidb = [self.roidb[i] for i in range(cur_from, cur_to)] else: pad = cur_to - self.size roidb = self.roidb[cur_from:] + self.roidb[:pad] batch_list = [] for islice in slices: num_im = islice.stop - islice.start iroidb = [roidb[i] for i in range(islice.start, islice.stop)] batch = minibatch.get_minibatch(iroidb, self.num_classes, self.ctx) batch_list.append(batch) all_batch = dict() for key in batch_list[0].keys(): all_batch[key] = tensor_vstack([batch[key] for batch in batch_list]) return all_batch
def get_image_array(roidb, scales, scale_indexes, need_mean=True): """ build image array from specific roidb :param roidb: images to be processed :param scales: scale list :param scale_indexes: indexes :return: array [b, c, h, w], list of scales """ num_images = len(roidb) processed_ims = [] im_scales = [] for i in range(num_images): im = cv2.imread(roidb[i]['image']) if roidb[i]['flipped']: im = im[:, ::-1, :] target_size = scales[scale_indexes[i]] im, im_scale = image_processing.resize(im, target_size, config.MAX_SIZE) im_tensor = image_processing.transform(im, config.PIXEL_MEANS, need_mean=need_mean) processed_ims.append(im_tensor) im_scales.append(im_scale) array = image_processing.tensor_vstack(processed_ims) return array, im_scales
def get_batch(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] if self.mode == 'test': self.data, self.label = minibatch.get_minibatch( roidb, self.num_classes, self.mode) else: work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = minibatch.get_minibatch(iroidb, self.num_classes, self.mode) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack( [batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack( [batch[key] for batch in label_list]) self.data = [ mx.nd.array(all_data['data']), mx.nd.array(all_data['rois']) ] self.label = [ mx.nd.array(all_label['label']), mx.nd.array(all_label['bbox_target']), mx.nd.array(all_label['bbox_inside_weight']), mx.nd.array(all_label['bbox_outside_weight']) ]
def get_batch(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] if self.mode == 'test': self.data, self.label = minibatch.get_minibatch(roidb, self.num_classes, self.mode) else: work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = minibatch.get_minibatch(iroidb, self.num_classes, self.mode) data_list.append(data) label_list.append(label) all_data = dict() for key in data_list[0].keys(): all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in label_list[0].keys(): all_label[key] = tensor_vstack([batch[key] for batch in label_list]) self.data = [mx.nd.array(all_data['data']), mx.nd.array(all_data['rois'])] self.label = [mx.nd.array(all_label['label']), mx.nd.array(all_label['bbox_target']), mx.nd.array(all_label['bbox_inside_weight']), mx.nd.array(all_label['bbox_outside_weight'])]
def get_image_array(roidb, scales, scale_indexes): """ build image array from specific roidb :param roidb: images to be processed :param scales: scale list :param scale_indexes: indexes :return: array [b, c, h, w], list of scales """ num_images = len(roidb) processed_ims = [] im_scales = [] for i in range(num_images): im = cv2.imread(roidb[i]['image']) if roidb[i]['flipped']: im = im[:, ::-1, :] target_size = scales[scale_indexes[i]] im, im_scale = image_processing.resize(im, target_size, config.MAX_SIZE) im_tensor = image_processing.transform(im, config.PIXEL_MEANS) processed_ims.append(im_tensor) im_scales.append(im_scale) array = image_processing.tensor_vstack(processed_ims) return array, im_scales
def get_batch(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] if self.mode == 'test': self.data, self.label = minibatch.get_minibatch( roidb, self.num_classes, self.mode) else: work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = minibatch.get_minibatch(iroidb, self.num_classes, self.mode) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # assign anchor for label label = minibatch.assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) del data['im_info'] new_label_list.append(label) all_data = dict() for key in ['data']: all_data[key] = tensor_vstack( [batch[key] for batch in data_list]) all_label = dict() all_label['label'] = tensor_vstack( [batch['label'] for batch in new_label_list], pad=-1) for key in [ 'bbox_target', 'bbox_inside_weight', 'bbox_outside_weight' ]: all_label[key] = tensor_vstack( [batch[key] for batch in new_label_list]) self.data = [mx.nd.array(all_data['data'])] self.label = [ mx.nd.array(all_label['label']), mx.nd.array(all_label['bbox_target']), mx.nd.array(all_label['bbox_inside_weight']), mx.nd.array(all_label['bbox_outside_weight']) ]