def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.class_num, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.multi_label) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack( [batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def parfetch(self, iroidb): # get testing data for multigpu data, label = get_rpn_batch(iroidb, self.cfg) data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape_p3, _ = self.feat_sym_p3.infer_shape(**data_shape) feat_shape_p3 = [int(i) for i in feat_shape_p3[0]] _, feat_shape_p4, _ = self.feat_sym_p4.infer_shape(**data_shape) feat_shape_p4 = [int(i) for i in feat_shape_p4[0]] _, feat_shape_p5, _ = self.feat_sym_p5.infer_shape(**data_shape) feat_shape_p5 = [int(i) for i in feat_shape_p5[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape_p3, feat_shape_p4, feat_shape_p5, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride_p3, self.anchor_scales_p3, self.anchor_ratios_p3, self.feat_stride_p4, self.anchor_scales_p4, self.anchor_ratios_p4, self.feat_stride_p5, self.anchor_scales_p5, self.anchor_ratios_p5, self.allowed_border) return {'data': data, 'label': label}
def parfetch(self, roidb): # get data for multi-gpu data, label = get_rpn_batch(roidb, self.cfg) data_shape = {k: v.shape for k, v in data.items()} # not use im info for RPN training del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # add gt_masks to data for e2e assert len(roidb) == 1 gt_masks = get_gt_masks(roidb[0]['cache_seg_inst'], data['im_info'][0,:2].astype('int')) data['gt_masks'] = gt_masks # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) # don't like the original name new_label = { 'proposal_label': label['label'], 'proposal_bbox_target': label['bbox_target'], 'proposal_bbox_weight': label['bbox_weight'], } label = new_label return {'data': data, 'label': label}
def parfetch(self, iroidb): # get testing data for multigpu #data, label = get_rpn_triple_batch(iroidb, self.cfg) if config.TRAIN.END2END and config.TRAIN.E2E_NAME == "base": # resnet_rfcn data, label = get_rpn_batch(iroidb, self.cfg) elif config.TRAIN.END2END and config.TRAIN.E2E_NAME == "off": data, label = get_rpn_pair_batch(iroidb, self.cfg) elif config.TRAIN.END2END: data, label = get_rpn_triple_batch(iroidb, self.cfg) else: data, label = get_rpn_triple_batch(iroidb, self.cfg) data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) return {'data': data, 'label': label}
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb, self.cfg) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def par_assign_anchor_wrapper(cfg, iroidb, feat_sym, feat_strides, anchor_scales, anchor_ratios, allowed_border): data, rpn_label = get_rpn_batch(iroidb, cfg) data_shape = {k:v.shape for k,v in data.items()} del data_shape['im_info'] data['gt_boxes'] = rpn_label['gt_boxes'][np.newaxis,:,:] feat_shape = [y[1] for y in [x.infer_shape(**data_shape) for x in feat_sym]] label = assign_pyramid_anchor(feat_shape, rpn_label['gt_boxes'],data['im_info'],cfg, feat_strides, anchor_scales, anchor_ratios, allowed_border) return {'data':data,'label':label}
def get_batch_parallel(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) slices = _split_input_slice(self.batch_size, work_load_list) max_data = {} max_label = {} data_lst = [] rpn_label_lst = [] for idx, islice in enumerate(slices): iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, rpn_label = get_rpn_batch(iroidb, self.cfg) data['gt_boxes'] = rpn_label['gt_boxes'][np.newaxis,:,:] data_shape = {k:list(v.shape) for k,v in data.items()} if max_data == {} : max_data = data_shape else: #max_data = {k:np.where(max_data[k]>v,max_data[k],v) for k,v in data_shape.items() } for k,v in data_shape.items(): max_data[k] = np.where(np.array(max_data[k])>np.array(data_shape[k]),np.array(max_data[k]),np.array(data_shape[k])) data_lst.append(data) rpn_label_lst.append(rpn_label) for k,v in max_data.items(): max_data[k][0] = self.batch_size self.data = [mx.nd.zeros(tuple(max_data['data'])),mx.nd.zeros(tuple(max_data['im_info'])),mx.nd.full(tuple(max_data['gt_boxes']),-1)] del max_data['im_info'] del max_data['gt_boxes'] max_data = {k:tuple(v) for k,v in max_data.items()} all_label = {} for idx, islice in enumerate(slices): feat_shape = [y[1] for y in [x.infer_shape(**max_data) for x in self.feat_sym]] d = data_lst[idx] self.data[0][idx,:d['data'].shape[1],:d['data'].shape[2],:d['data'].shape[3]] = d['data'][0] self.data[1][idx,:d['im_info'].shape[1]] = d['im_info'][0] self.data[2][idx,:d['gt_boxes'].shape[1],:d['gt_boxes'].shape[2]] = d['gt_boxes'][0] label = assign_pyramid_anchor(feat_shape, rpn_label_lst[idx]['gt_boxes'],data_lst[idx]['im_info'],self.cfg, self.feat_strides, self.anchor_scales, self.anchor_ratios, self.allowed_border) if all_label == {}: all_label = label else: for k,v in label.items(): all_label[k] = np.vstack([all_label[k],v]) self.label = [mx.nd.array(v) for k,v in all_label.items()]
def par_assign_anchor_wrapper(cfg, iroidb, feat_sym, feat_strides, anchor_scales, anchor_ratios, allowed_border): # get testing data for multigpu data, rpn_label = get_rpn_batch(iroidb, cfg) data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] # add gt_boxes to data for e2e data['gt_boxes'] = rpn_label['gt_boxes'][np.newaxis, :, :] feat_shape = [y[1] for y in [x.infer_shape(**data_shape) for x in feat_sym]] label = assign_pyramid_anchor(feat_shape, rpn_label['gt_boxes'], data['im_info'], cfg, feat_strides, anchor_scales, anchor_ratios, allowed_border) return {'data': data, 'label': label}
def parfetch(self, iroidb): # get testing data for multigpu data, label = get_rpn_batch(iroidb, self.cfg) data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) return {'data': data, 'label': label}
def parfetch(self, iroidb): # get testing data for multigpu data, label = get_rpn_batch(iroidb, self.cfg) data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.cfg, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border, self.normalize_target, self.bbox_mean, self.bbox_std) return {'data': data, 'label': label}
def parfetch(self, iroidb): # get testing data for multigpu data, label = get_rpn_batch(iroidb, self.cfg) data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape=feat_shape, gt_boxes=label['gt_boxes'], im_info=data['im_info'], cfg=self.cfg, class_num=self.class_num, feat_stride=self.feat_stride, scales=self.anchor_scales, ratios=self.anchor_ratios, allowed_border=self.allowed_border, multi_label=self.multi_label) return {'data': data, 'label': label}