def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] data['gt_keypoints'] = label['gt_keypoints'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack( [batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def parfetch(self, roidb): # get data for multi-gpu data, label = get_rpn_batch(roidb) data_shape = {k: v.shape for k, v in data.items()} # not use im info for RPN training del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # add gt_masks to data for e2e assert len(roidb) == 1 gt_masks = get_gt_masks(roidb[0]['cache_seg_inst'], data['im_info'][0, :2].astype('int')) data['gt_masks'] = gt_masks # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) return {'data': data, 'label': label}
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] _, feat_shape, _ = self.feat_sym.infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] # assign anchor for label label = assign_anchor(feat_shape, label['gt_boxes'], data['im_info'], self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) new_label_list.append(label) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = -1 if key == 'label' else 0 all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch(self): cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] if self.has_rpn: if not self.withlabel: data, label = get_rpn_testbatch(roidb) else: data, label = get_rpn_batch(roidb) else: data, label = get_rcnn_testbatch(roidb) #print('in_loader', label['gt_boxes'].shape, file=sys.stderr) self.im_info = data['im_info'] self.data = [mx.nd.array(data[name]) for name in self.data_name]
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for i_card in range(len(data_list)): data_list[i_card]['data'] = data_tensor[ i_card * config.TRAIN.BATCH_IMAGES:(1 + i_card) * config.TRAIN.BATCH_IMAGES] for data, label in zip(data_list, label_list): data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] feat_shape_list = [] for s in range(len(self.feat_stride)): _, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] feat_shape_list.append(feat_shape) label['label'] = [0 for i in range(config.TRAIN.BATCH_IMAGES)] label['bbox_target'] = [0 for i in range(config.TRAIN.BATCH_IMAGES)] label['bbox_weight'] = [0 for i in range(config.TRAIN.BATCH_IMAGES)] for im_i in range(config.TRAIN.BATCH_IMAGES): im_info = data['im_info'][im_i] gt_boxes = label['gt_boxes'][im_i][0] label_dict = \ assign_anchor_fpn(feat_shape_list, gt_boxes, im_info, self.feat_stride, self.anchor_scales, self.anchor_ratios, self.allowed_border) label['label'][im_i] = label_dict['label'] label['bbox_target'][im_i] = label_dict['bbox_target'] label['bbox_weight'][im_i] = label_dict['bbox_weight'] label['label'] = np.vstack(label['label']) label['bbox_target'] = np.vstack(label['bbox_target']) label['bbox_weight'] = np.vstack(label['bbox_weight']) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = 0 if key == 'weight' else -1 all_label[key] = tensor_vstack([batch[key] for batch in label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] data, label = get_rpn_batch(iroidb) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for i_card in range(len(data_list)): data_list[i_card]['data'] = data_tensor[ i_card * config.TRAIN.BATCH_IMAGES:(1 + i_card) * config.TRAIN.BATCH_IMAGES] for data, label in zip(data_list, label_list): data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] feat_shape_list = [] for s in range(len(self.feat_stride)): _, feat_shape, _ = self.feat_sym[s].infer_shape(**data_shape) feat_shape = [int(i) for i in feat_shape[0]] feat_shape_list.append(feat_shape) for k in self.label_name: label[k] = [0 for i in range(config.TRAIN.BATCH_IMAGES)] for im_i in range(config.TRAIN.BATCH_IMAGES): #im_info = data['im_info'][im_i] #gt_boxes = label['gt_boxes'][im_i][0] im_info = data['im_info'] gt_boxes = label['gt_boxes'] #print('im_info', im_info.shape) #print(gt_boxes.shape) vis=True if self.visid>=0 else False label_dict = assign_anchor_fpn(feat_shape_list, gt_boxes, im_info, vis) #do visualize for debug here if self.visid>=0 and self.visid<10: for _roi in roidb: print('image', _roi['image'], file=sys.stderr) self.visid+=1 anchors = label_dict['anchors'].copy() _im = data['data'].copy() #print(label_dict['olabel'].shape) _label = label_dict['olabel'].copy() _gt_boxes = gt_boxes.copy().astype(np.int) filename = './vis/A%d.png'%self.visid _im = _im[0].transpose( (1,2,0) ) _im = _im[...,::-1] #bgr for c in range(3): _im[:,:,c] += config.PIXEL_MEANS[c] _im = _im.astype(np.uint8).copy() fg_inds = np.where(_label == 1)[0] print(_im.shape, _label.shape, anchors.shape, len(fg_inds), _gt_boxes.shape, file=sys.stderr) #draw FG anchors _bc = 0 for a in range(anchors.shape[0]): anchor = anchors[a].astype(np.int) l = _label[a] if l!=1: continue #print('drawing', _im.shape, anchor) cv2.rectangle(_im, (anchor[0], anchor[1]), (anchor[2], anchor[3]), (255, 0, 0), 1) _bc+=1 for a in range(_gt_boxes.shape[0]): _box = _gt_boxes[a] cv2.rectangle(_im, (_box[0], _box[1]), (_box[2], _box[3]), (0, 0, 255), 1) print('draw to', filename, _bc, file=sys.stderr) cv2.imwrite(filename, _im) for k in self.label_name: #print('0in_loader', k, label_dict[k].shape, file=sys.stderr) label[k][im_i] = label_dict[k] for k in self.label_name: label[k] = np.vstack(label[k]) #print('in_loader', k, label[k].shape, file=sys.stderr) all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) all_label = dict() for key in self.label_name: pad = 0 if key.startswith('bbox_') else -1 #print('label vstack', key, pad, len(label_list), file=sys.stderr) all_label[key] = tensor_vstack([batch[key] for batch in label_list], pad=pad) self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch(self): # slice roidb cur_from = self.cur cur_to = min(cur_from + self.batch_size, self.size) roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)] # decide multi device slice work_load_list = self.work_load_list ctx = self.ctx if work_load_list is None: work_load_list = [1] * len(ctx) assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \ "Invalid settings for work load. " slices = _split_input_slice(self.batch_size, work_load_list) # get testing data for multigpu data_list = [] label_list = [] for islice in slices: iroidb = [roidb[i] for i in range(islice.start, islice.stop)] #print('get_rpn_batch') data, label = get_rpn_batch(iroidb, self.use_data_augmentation) data_list.append(data) label_list.append(label) # pad data first and then assign anchor (read label) data_tensor = tensor_vstack([batch['data'] for batch in data_list]) for data, data_pad in zip(data_list, data_tensor): data['data'] = data_pad[np.newaxis, :] new_label_list = [] #print(label_list) #print("label_list") for data, label in zip(data_list, label_list): # infer label shape data_shape = {k: v.shape for k, v in data.items()} del data_shape['im_info'] # assign anchor for label #print(data['im_info']) #print('data_shape') #print(data_shape) new_label_list.append( self._assign_anchor(data_shape, label['gt_boxes'], data['im_info'])) # add gt_boxes to data for e2e data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :] all_data = dict() for key in self.data_name: all_data[key] = tensor_vstack([batch[key] for batch in data_list]) # print(len(new_label_list)) all_label = dict() all_label['labelp2'] = new_label_list[0][0]['label'] all_label['labelp3'] = new_label_list[0][1]['label'] all_label['labelp4'] = new_label_list[0][2]['label'] all_label['labelp5'] = new_label_list[0][3]['label'] #for key in self.label_name: all_label['bbox_targetp2'] = new_label_list[0][0]['bbox_target'] all_label['bbox_targetp3'] = new_label_list[0][1]['bbox_target'] all_label['bbox_targetp4'] = new_label_list[0][2]['bbox_target'] all_label['bbox_targetp5'] = new_label_list[0][3]['bbox_target'] #for key in self.label_name: # pad = -1 if key == 'label' else 0 all_label['bbox_weightp2'] = new_label_list[0][0]['bbox_weight'] all_label['bbox_weightp3'] = new_label_list[0][1]['bbox_weight'] all_label['bbox_weightp4'] = new_label_list[0][2]['bbox_weight'] all_label['bbox_weightp5'] = new_label_list[0][3]['bbox_weight'] #for key in self.label_name: # all_label[key] = [batch[key] for batch in new_label_list] self.data = [mx.nd.array(all_data[key]) for key in self.data_name] self.label = [mx.nd.array(all_label[key]) for key in self.label_name]