def parfetch(config, crop_width, crop_height, isegdb): # get testing data for multigpu data, label = get_segmentation_train_batch(isegdb, config) if config.TRAIN.ENABLE_CROP: data_internal = data['data'] label_internal = label['label'] sx = math.floor(random.random() * (data_internal.shape[3] - crop_width + 1)) sy = math.floor(random.random() * (data_internal.shape[2] - crop_height + 1)) sx = (int)(sx) sy = (int)(sy) assert(sx >= 0 and sx < data_internal.shape[3] - crop_width + 1) assert(sy >= 0 and sy < data_internal.shape[2] - crop_height + 1) ex = (int)(sx + crop_width - 1) ey = (int)(sy + crop_height - 1) data_internal = data_internal[:, :, sy : ey + 1, sx : ex + 1] label_internal = label_internal[:, :, sy : ey + 1, sx : ex + 1] data['data'] = data_internal label['label'] = label_internal assert (data['data'].shape[2] == crop_height) and (data['data'].shape[3] == crop_width) assert (label['label'].shape[2] == crop_height) and (label['label'].shape[3] == crop_width) return {'data': data, 'label': label}
def parfetch(config, crop_width, crop_height, isegdb): # get testing data for multigpu if config.dataset.dataset == "PascalVOC" or config.dataset.dataset == "ADE20K": datas = {} labels = {} datas['data'], labels['label'] = get_segmentation_image_voc( isegdb, config) if config.network.use_metric: labels['metric_label'] = generate_metric_label(labels['label']) if config.TRAIN.use_mult_metric: for i in [1, 2, 4]: labels['metric_label_' + str(i)] = generate_metric_label( labels['label'], skip_step=i) return {'data': datas, 'label': labels} else: datas, labels = get_segmentation_train_batch(isegdb, config) feature_stride = config.network.LABEL_STRIDE network_ratio = config.network.ratio if config.TRAIN.enable_crop: datas_internal = datas['data'] labels_internal = labels['label'] sx = math.floor(random.random() * (datas_internal.shape[3] - crop_width + 1)) sy = math.floor(random.random() * (datas_internal.shape[2] - crop_height + 1)) sx = (int)(sx) sy = (int)(sy) assert (sx >= 0 and sx < datas_internal.shape[3] - crop_width + 1) assert (sy >= 0 and sy < datas_internal.shape[2] - crop_height + 1) ex = (int)(sx + crop_width - 1) ey = (int)(sy + crop_height - 1) datas_internal = datas_internal[:, :, sy:ey + 1, sx:ex + 1] labels_internal = labels_internal[:, :, sy:ey + 1, sx:ex + 1] if config.network.use_crop_context: crop_context_scale = config.network.crop_context_scale scale_width = make_divisible( int(float(crop_width) / crop_context_scale), feature_stride) scale_height = make_divisible( int(float(crop_height) / crop_context_scale), feature_stride) pad_width = int(scale_width - crop_width) / 2 pad_height = int(scale_height - crop_height) / 2 datas['origin_data'] = np.zeros( (datas['data'].shape[0], datas['data'].shape[1], datas['data'].shape[2] + 2 * int(pad_height), datas['data'].shape[3] + 2 * int(pad_width))) datas['origin_data'][:, :, int(pad_height):datas['data'].shape[2] + int(pad_height), int(pad_width):datas['data'].shape[3] + int(pad_width)] = datas['data'] labels['origin_label'] = np.full( (labels['label'].shape[0], labels['label'].shape[1], labels['label'].shape[2] + 2 * int(pad_height), labels['label'].shape[3] + 2 * int(pad_width)), 255) labels[ 'origin_label'][:, :, int(pad_height):labels['label'].shape[2] + int(pad_height), int(pad_width):labels['label'].shape[3] + int(pad_width)] = labels['label'] datas_origin = datas['origin_data'][:, :, sy:sy + scale_height, sx:sx + scale_width] labels_origin = labels['origin_label'][:, :, sy:sy + scale_height, sx:sx + scale_width] datas['origin_data'] = datas_origin labels['origin_label'] = labels_origin # labels_origin_in = np.zeros((labels['origin_label'].shape[0],labels['origin_label'].shape[1], # labels['origin_label'].shape[2]//feature_stride,labels['origin_label'].shape[3]//feature_stride)) # for i, label in enumerate(labels['origin_label']): # label_im = Image.fromarray(np.squeeze(label.astype(np.uint8, copy=False))).resize( # (labels['origin_label'].shape[3] // feature_stride, # labels['origin_label'].shape[2] // feature_stride), Image.NEAREST) # label = np.array(label_im) # labels_origin_in[i, 0, :, :] = label # # labels['origin_label']=labels_origin_in rois = [] for i, im_info in zip(xrange(datas_internal.shape[0]), datas['im_info']): rois.append( np.array([ i, pad_width, pad_height, pad_width + crop_width, pad_height + crop_height ]).reshape((1, 5))) datas['rois'] = tensor_vstack(rois) # print rois datas['data'] = datas_internal labels['label'] = labels_internal else: rois = [] for i, im_info in zip(xrange(datas_internal.shape[0]), datas['im_info']): scale = im_info[2] rois.append( np.array([ i, sx * network_ratio / scale, sy * network_ratio / scale, (ex + 1) * network_ratio / scale, (ey + 1) * network_ratio / scale ]).reshape((1, 5))) datas['rois'] = tensor_vstack(rois) datas['data'] = datas_internal labels['label'] = labels_internal assert (datas['data'].shape[2] == crop_height) and (datas['data'].shape[3] == crop_width) else: datas_internal = datas['data'] rois = [] for i, im_info in zip(xrange(datas_internal.shape[0]), datas['im_info']): im_size = im_info[:2] rois.append( np.array([ i, 0, 0, im_size[1] * network_ratio, im_size[0] * network_ratio ]).reshape((1, 5))) datas['rois'] = tensor_vstack(rois) # if feature_stride == 1: # assert (labels['label'].shape[2] == crop_height) and (labels['label'].shape[3] == crop_width) # else: labels_in = dict() labels_in['origin_label'] = labels['origin_label'] labels_in['label'] = np.zeros( (labels['label'].shape[0], labels['label'].shape[1], labels['label'].shape[2] // feature_stride, labels['label'].shape[3] // feature_stride)) # to reshape the label to the network label for i, label in enumerate(labels['label']): label_im = Image.fromarray( np.squeeze(label.astype(np.uint8, copy=False))).resize( (labels['label'].shape[3] // feature_stride, labels['label'].shape[2] // feature_stride), Image.NEAREST) label = np.array(label_im) labels_in['label'][i, 0, :, :] = label labels = labels_in if config.TRAIN.enable_ignore_border: labels['label'] = border_ignore_label( labels['label'], config.TRAIN.ignore_border_size, 255.0) if config.network.use_metric: labels['metric_label'] = generate_metric_label(labels['label']) if config.TRAIN.use_mult_metric: scale_name = ['a', 'b', 'c'] if config.network.scale_list == [1, 2, 4]: scale_name = ['', '', ''] for idx, i in enumerate(config.network.scale_list): labels['metric_label_' + str(i) + scale_name[idx]] = generate_metric_label( labels['label'], skip_step=i) return {'data': datas, 'label': labels}