class CapsulePoseTest(object): def __init__(self, configer): self.configer = configer self.pose_visualizer = PoseVisualizer(configer) self.pose_parser = PoseParser(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None def init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) cur_img_rgb = ImageHelper.resize(ori_img_rgb, self.configer.get( 'data', 'input_size'), interpolation=Image.CUBIC) ori_img_bgr = ImageHelper.bgr2rgb(ori_img_rgb) paf_avg, heatmap_avg, partmap_avg = self.__get_paf_and_heatmap( cur_img_rgb) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( cur_img_rgb, paf_avg, partmap_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(cur_img_rgb, subset, candidate) for i in range(len(json_dict['objects'])): for index in range(len(json_dict['objects'][i]['keypoints'])): if json_dict['objects'][i]['keypoints'][index][2] == -1: continue json_dict['objects'][i]['keypoints'][index][0] *= ( ori_img_rgb.shape[1] / cur_img_rgb.shape[1]) json_dict['objects'][i]['keypoints'][index][1] *= ( ori_img_rgb.shape[0] / cur_img_rgb.shape[0]) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Save Path: {}'.format(json_path)) with open(json_path, 'w') as save_stream: save_stream.write(json.dumps(json_dict)) def __get_info_tree(self, image_raw, subset, candidate): json_dict = dict() height, width, _ = image_raw.shape json_dict['image_height'] = height json_dict['image_width'] = width object_list = list() for n in range(len(subset)): if subset[n][-1] <= 1: continue object_dict = dict() object_dict['keypoints'] = np.zeros( (self.configer.get('data', 'num_keypoints'), 3)).tolist() for j in range(self.configer.get('data', 'num_keypoints')): index = subset[n][j] if index == -1: object_dict['keypoints'][j][0] = -1 object_dict['keypoints'][j][1] = -1 object_dict['keypoints'][j][2] = -1 else: object_dict['keypoints'][j][0] = candidate[index.astype( int)][0] object_dict['keypoints'][j][1] = candidate[index.astype( int)][1] object_dict['keypoints'][j][2] = 1 object_dict['score'] = subset[n][-2] object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def __get_paf_and_heatmap(self, img_raw): multiplier = [ scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1] for scale in self.configer.get('data', 'scale_search') ] heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('data', 'num_keypoints'))) paf_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'paf_out'))) partmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out'))) for i, scale in enumerate(multiplier): img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) img_test_pad, pad = PadImage(self.configer.get( 'network', 'stride'))(img_test) pad_right = pad[2] pad_down = pad[3] img_test_pad = ToTensor()(img_test_pad) img_test_pad = Normalize( mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(img_test_pad) with torch.no_grad(): img_test_pad = img_test_pad.unsqueeze(0).to(self.device) paf_out_list, partmap_out_list = self.pose_net(img_test_pad) paf_out = paf_out_list[-1] partmap_out = partmap_out_list[-1] partmap = partmap_out.data.squeeze().cpu().numpy().transpose( 1, 2, 0) paf = paf_out.data.squeeze().cpu().numpy().transpose(1, 2, 0) # self.pose_visualizer.vis_tensor(heatmap_out) heatmap = np.zeros((partmap.shape[0], partmap.shape[1], self.configer.get('data', 'num_keypoints'))) part_num = np.zeros((self.configer.get('data', 'num_keypoints'), )) for index in range(len(self.configer.get('details', 'limb_seq'))): a = self.configer.get('details', 'limb_seq')[index][0] - 1 b = self.configer.get('details', 'limb_seq')[index][1] - 1 heatmap_a = partmap[:, :, index * 4:index * 4 + 2]**2 heatmap_a = np.sqrt(np.sum(heatmap_a, axis=2).squeeze()) heatmap[:, :, a] = (heatmap[:, :, a] * part_num[a] + heatmap_a) / (part_num[a] + 1) part_num[a] += 1 heatmap_b = partmap[:, :, index * 4 + 2:index * 4 + 4]**2 heatmap_b = np.sqrt(np.sum(heatmap_b, axis=2).squeeze()) heatmap[:, :, b] = (heatmap[:, :, b] * part_num[b] + heatmap_b) / (part_num[b] + 1) part_num[b] += 1 heatmap = cv2.resize(heatmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:img_test_pad.size(2) - pad_down, :img_test_pad.size(3) - pad_right, :] heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) partmap = cv2.resize(partmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) partmap = partmap[:img_test_pad.size(2) - pad_down, :img_test_pad.size(3) - pad_right, :] partmap = cv2.resize(partmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) paf = cv2.resize(paf, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) paf = paf[:img_test_pad.size(2) - pad_down, :img_test_pad.size(3) - pad_right, :] paf = cv2.resize(paf, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) partmap_avg = partmap_avg + partmap / len(multiplier) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) return paf_avg, heatmap_avg, partmap_avg def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] peak_counter = 0 for part in range(self.configer.get('data', 'num_keypoints')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip( np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] ids = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [ peaks_with_score[i] + (ids[i], ) for i in range(len(ids)) ] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def __extract_paf_info(self, img_raw, paf_avg, partmap_avg, all_peaks): connection_all = [] special_k = [] mid_num = self.configer.get('vis', 'mid_point_num') for k in range(len(self.configer.get('details', 'limb_seq'))): score_mid = paf_avg[:, :, [k * 2, k * 2 + 1]] # self.pose_visualizer.vis_paf(score_mid, img_raw, name='pa{}'.format(k)) candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1] candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec_a = partmap_avg[candA[i][1], candA[i][0], k * 4:k * 4 + 2] vec_b = -partmap_avg[candB[j][1], candB[j][0], k * 4 + 2:k * 4 + 4] norm_a = math.sqrt(vec_a[0] * vec_a[0] + vec_a[1] * vec_a[1]) + 1e-9 vec_a = np.divide(vec_a, norm_a) norm_b = math.sqrt(vec_b[0] * vec_b[0] + vec_b[1] * vec_b[1]) + 1e-9 vec_b = np.divide(vec_b, norm_b) vec = np.subtract(candB[j][:2], candA[i][:2]) sim_length = np.sum(vec_a * vec + vec_b * vec) / 2.0 norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9 vec = np.divide(vec, norm) startend = zip( np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num)) startend = list(startend) vec_x = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend)) ]) vec_y = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend)) ]) score_midpts = np.multiply( vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len( score_midpts) score_with_dist_prior += min( 0.5 * img_raw.shape[0] / norm - 1, 0) num_positive = len( np.nonzero(score_midpts > self.configer.get( 'vis', 'limb_threshold'))[0]) criterion1 = num_positive > int( self.configer.get('vis', 'limb_pos_ratio') * len(score_midpts)) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2 and sim_length > self.configer.get( 'vis', 'sim_length'): connection_candidate.append([ i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2] ]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack( [connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return special_k, connection_all def __get_subsets(self, connection_all, special_k, all_peaks): # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones( (0, self.configer.get('data', 'num_keypoints') + 2)) candidate = np.array( [item for sublist in all_peaks for item in sublist]) for k in self.configer.get('details', 'mini_tree'): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array( self.configer.get('details', 'limb_seq')[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][ indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if (subset[j][indexB] != partBs[i]): subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found: row = -1 * np.ones( self.configer.get('data', 'num_keypoints') + 2) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum( candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) return subset, candidate def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) if not os.path.exists(os.path.dirname(json_path)): os.makedirs(os.path.dirname(json_path)) if not os.path.exists(os.path.dirname(raw_path)): os.makedirs(os.path.dirname(raw_path)) if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path)) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) if not os.path.exists(os.path.dirname(json_path)): os.makedirs(os.path.dirname(json_path)) if not os.path.exists(os.path.dirname(raw_path)): os.makedirs(os.path.dirname(raw_path)) if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path)) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/pose', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) val_data_loader = self.pose_data_loader.get_valloader() count = 0 for i, (inputs, partmap, maskmap, vecmap) in enumerate(val_data_loader): for j in range(inputs.size(0)): count = count + 1 if count > 2: exit(1) Log.info(partmap.size()) ori_img = DeNormalize( mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs[j]) ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8) image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR) partmap_avg = partmap[j].numpy().transpose(1, 2, 0) heatmap_avg = np.zeros( (partmap_avg.shape[0], partmap_avg.shape[1], self.configer.get('data', 'num_keypoints'))) part_num = np.zeros((self.configer.get('data', 'num_keypoints'), )) for index in range( len(self.configer.get('details', 'limb_seq'))): a = self.configer.get('details', 'limb_seq')[index][0] - 1 b = self.configer.get('details', 'limb_seq')[index][1] - 1 heatmap_a = partmap_avg[:, :, index * 4:index * 4 + 2]**2 heatmap_a = np.sqrt(np.sum(heatmap_a, axis=2).squeeze()) heatmap_avg[:, :, a] = (heatmap_avg[:, :, a] * part_num[a] + heatmap_a) / (part_num[a] + 1) part_num[a] += 1 heatmap_b = partmap_avg[:, :, index * 4 + 2:index * 4 + 4]**2 heatmap_b = np.sqrt(np.sum(heatmap_b, axis=2).squeeze()) heatmap_avg[:, :, b] = (heatmap_avg[:, :, b] * part_num[b] + heatmap_b) / (part_num[b] + 1) part_num[b] += 1 partmap_avg = cv2.resize( partmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap_avg = cv2.resize( heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) paf_avg = vecmap[j].numpy().transpose(1, 2, 0) paf_avg = cv2.resize(paf_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr) self.pose_visualizer.vis_paf(paf_avg, image_bgr) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( image_bgr, paf_avg, partmap_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(image_bgr, subset, candidate) image_canvas = self.pose_parser.draw_points( image_bgr, json_dict) image_canvas = self.pose_parser.link_points( image_canvas, json_dict) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey() def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(os.path.join(dir_name, item)): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class FCNSegmentor(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.seg_visualizer = SegVisualizer(configer) self.seg_loss_manager = SegLossManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.seg_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.lr = None self.iters = None def init_model(self): self.seg_net = self.seg_model_manager.seg_net() self.iters = 0 self.seg_net, _ = self.module_utilizer.load_net(self.seg_net) self.optimizer, self.lr = self.module_utilizer.update_optimizer(self.seg_net, self.iters) if self.configer.get('dataset') == 'cityscape': self.train_loader = self.seg_data_loader.get_trainloader(FSCityScapeLoader) self.val_loader = self.seg_data_loader.get_valloader(FSCityScapeLoader) else: Log.error('Dataset: {} is not valid!'.format(self.configer.get('dataset'))) exit(1) self.pixel_loss = self.seg_loss_manager.get_seg_loss('cross_entropy_loss') def __train(self): """ Train function of every epoch during train phase. """ self.seg_net.train() start_time = time.time() # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects) for i, data_tuple in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. if len(data_tuple) < 2: Log.error('Train Loader Error!') exit(0) inputs = Variable(data_tuple[0].cuda(async=True)) targets = Variable(data_tuple[1].cuda(async=True)) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the train batch & backward. loss_pixel = self.pixel_loss(outputs, targets) loss = loss_pixel self.train_losses.update(loss.data[0], inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.iters += 1 # Print the log info & reset the states. if self.iters % self.configer.get('solver', 'display_iter') == 0: Log.info('Train Iteration: {0}\t' 'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n' 'Learning rate = {2}\n' 'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.iters % self.configer.get('solver', 'test_interval') == 0: self.__val() self.optimizer, self.lr = self.module_utilizer.update_optimizer(self.seg_net, self.iters) def __val(self): """ Validation function during the train phase. """ self.seg_net.eval() start_time = time.time() for j, data_tuple in enumerate(self.val_loader): # Change the data type. inputs = Variable(data_tuple[0].cuda(async=True), volatile=True) targets = Variable(data_tuple[1].cuda(async=True), volatile=True) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the val batch. loss_pixel = self.pixel_loss(outputs, targets) loss = loss_pixel self.val_losses.update(loss.data[0], inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.seg_net, self.iters) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format( batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.seg_net.train() def train(self): cudnn.benchmark = True while self.iters < self.configer.get('solver', 'max_iter'): self.__train() if self.iters == self.configer.get('solver', 'max_iter'): break
class OpenPose(object): """ The class for Pose Estimation. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.train_loss_heatmap = AverageMeter() self.train_loss_associate = AverageMeter() self.val_losses = AverageMeter() self.val_loss_heatmap = AverageMeter() self.val_loss_associate = AverageMeter() self.pose_visualizer = PoseVisualizer(configer) self.pose_loss_manager = PoseLossManager(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.heatmap_generator = HeatmapGenerator(configer) self.paf_generator = PafGenerator(configer) self.data_transformer = DataTransformer(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.pose_data_loader.get_trainloader() self.val_loader = self.pose_data_loader.get_valloader() self.weights = self.configer.get('network', 'loss_weights') self.mse_loss = self.pose_loss_manager.get_pose_loss('mse_loss') def _get_parameters(self): lr_1 = [] lr_2 = [] lr_4 = [] lr_8 = [] params_dict = dict(self.pose_net.named_parameters()) for key, value in params_dict.items(): if ('model1_' not in key) and ('model0.' not in key) and ('backbone.' not in key): if key[-4:] == 'bias': lr_8.append(value) else: lr_4.append(value) elif key[-4:] == 'bias': lr_2.append(value) else: lr_1.append(value) params = [{ 'params': lr_1, 'lr': self.configer.get('lr', 'base_lr') }, { 'params': lr_2, 'lr': self.configer.get('lr', 'base_lr') * 2., 'weight_decay': 0.0 }, { 'params': lr_4, 'lr': self.configer.get('lr', 'base_lr') * 4. }, { 'params': lr_8, 'lr': self.configer.get('lr', 'base_lr') * 8., 'weight_decay': 0.0 }] return params def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, data_dict in enumerate(self.train_loader): inputs = data_dict['img'] maskmap = data_dict['maskmap'] input_size = [inputs.size(3), inputs.size(2)] heatmap = self.heatmap_generator(data_dict['kpts'], input_size, maskmap=maskmap) vecmap = self.paf_generator(data_dict['kpts'], input_size, maskmap=maskmap) self.data_time.update(time.time() - start_time) # Change the data type. inputs, heatmap, maskmap, vecmap = self.module_utilizer.to_device( inputs, heatmap, maskmap, vecmap) # Forward pass. paf_out, heatmap_out = self.pose_net(inputs) # Compute the loss of the train batch & backward. loss_heatmap = self.mse_loss(heatmap_out, heatmap, mask=maskmap, weights=self.weights) loss_associate = self.mse_loss(paf_out, vecmap, mask=maskmap, weights=self.weights) loss = loss_heatmap + loss_associate self.train_losses.update(loss.item(), inputs.size(0)) self.train_loss_heatmap.update(loss_heatmap.item(), inputs.size(0)) self.train_loss_associate.update(loss_associate.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info('Loss Heatmap:{}, Loss Asso: {}'.format( self.train_loss_heatmap.avg, self.train_loss_associate.avg)) Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() self.train_loss_heatmap.reset() self.train_loss_associate.reset() # Check to val the current model. if self.val_loader is not None and \ self.configer.get('iters') % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() with torch.no_grad(): for i, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] maskmap = data_dict['maskmap'] input_size = [inputs.size(3), inputs.size(2)] heatmap = self.heatmap_generator(data_dict['kpts'], input_size, maskmap=maskmap) vecmap = self.paf_generator(data_dict['kpts'], input_size, maskmap=maskmap) # Change the data type. inputs, heatmap, maskmap, vecmap = self.module_utilizer.to_device( inputs, heatmap, maskmap, vecmap) # Forward pass. paf_out, heatmap_out = self.pose_net(inputs) # Compute the loss of the val batch. loss_heatmap = self.mse_loss(heatmap_out[-1], heatmap, maskmap) loss_associate = self.mse_loss(paf_out[-1], vecmap, maskmap) loss = loss_heatmap + loss_associate self.val_losses.update(loss.item(), inputs.size(0)) self.val_loss_heatmap.update(loss_heatmap.item(), inputs.size(0)) self.val_loss_associate.update(loss_associate.item(), inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.pose_net, save_mode='iters') Log.info('Loss Heatmap:{}, Loss Asso: {}'.format( self.val_loss_heatmap.avg, self.val_loss_associate.avg)) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.val_loss_heatmap.reset() self.val_loss_associate.reset() self.pose_net.train() def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get( 'network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class FasterRCNN(object): """ The class for Single Shot Detector. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.det_visualizer = DetVisualizer(configer) self.det_loss_manager = DetLossManager(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.rpn_target_generator = RPNTargetGenerator(configer) self.det_running_score = DetRunningScore(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.det_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.det_data_loader.get_trainloader() self.val_loader = self.det_data_loader.get_valloader() self.fr_loss = self.det_loss_manager.get_det_loss('fr_loss') def _get_parameters(self): lr_1 = [] lr_2 = [] params_dict = dict(self.det_net.named_parameters()) for key, value in params_dict.items(): if value.requires_grad: if 'bias' in key: lr_2.append(value) else: lr_1.append(value) params = [{ 'params': lr_1, 'lr': self.configer.get('lr', 'base_lr') }, { 'params': lr_2, 'lr': self.configer.get('lr', 'base_lr') * 2., 'weight_decay': 0 }] return params def __train(self): """ Train function of every epoch during train phase. """ self.det_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) for i, data_dict in enumerate(self.train_loader): inputs = data_dict['img'] img_scale = data_dict['imgscale'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] self.data_time.update(time.time() - start_time) # Change the data type. gt_bboxes, gt_nums, gt_labels = self.__make_tensor( batch_gt_bboxes, batch_gt_labels) gt_bboxes, gt_num, gt_labels = self.module_utilizer.to_device( gt_bboxes, gt_nums, gt_labels) inputs = self.module_utilizer.to_device(inputs) # Forward pass. feat_list, train_group = self.det_net(inputs, gt_bboxes, gt_num, gt_labels, img_scale) gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, batch_gt_bboxes, [inputs.size(3), inputs.size(2)]) gt_rpn_locs, gt_rpn_labels = self.module_utilizer.to_device( gt_rpn_locs, gt_rpn_labels) rpn_locs, rpn_scores, sample_roi_locs, sample_roi_scores, gt_roi_bboxes, gt_roi_labels = train_group # Compute the loss of the train batch & backward. loss = self.fr_loss( [rpn_locs, rpn_scores, sample_roi_locs, sample_roi_scores], [gt_rpn_locs, gt_rpn_labels, gt_roi_bboxes, gt_roi_labels]) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.module_utilizer.clip_grad(self.det_net, 10.) self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ (self.configer.get('iters')) % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.det_net.eval() start_time = time.time() with torch.no_grad(): for j, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] img_scale = data_dict['imgscale'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] # Change the data type. gt_bboxes, gt_nums, gt_labels = self.__make_tensor( batch_gt_bboxes, batch_gt_labels) gt_bboxes, gt_num, gt_labels = self.module_utilizer.to_device( gt_bboxes, gt_nums, gt_labels) inputs = self.module_utilizer.to_device(inputs) # Forward pass. feat_list, train_group, test_group = self.det_net( inputs, gt_bboxes, gt_nums, gt_labels, img_scale) rpn_locs, rpn_scores, sample_roi_locs, sample_roi_scores, gt_roi_bboxes, gt_roi_labels = train_group gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, batch_gt_bboxes, [inputs.size(3), inputs.size(2)]) gt_rpn_locs, gt_rpn_labels = self.module_utilizer.to_device( gt_rpn_locs, gt_rpn_labels) # Compute the loss of the train batch & backward. loss = self.fr_loss( [rpn_locs, rpn_scores, sample_roi_locs, sample_roi_scores], [gt_rpn_locs, gt_rpn_labels, gt_roi_bboxes, gt_roi_labels]) self.val_losses.update(loss.item(), inputs.size(0)) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = FastRCNNTest.decode( test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, [inputs.size(3), inputs.size(2)]) batch_pred_bboxes = self.__get_object_list(batch_detections) self.det_running_score.update(batch_pred_bboxes, batch_gt_bboxes, batch_gt_labels) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.det_net, save_mode='iters') # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Val mAP: {}\n'.format(self.det_running_score.get_mAP())) self.det_running_score.reset() self.batch_time.reset() self.val_losses.reset() self.det_net.train() def __make_tensor(self, gt_bboxes, gt_labels): len_arr = [gt_labels[i].numel() for i in range(len(gt_bboxes))] batch_maxlen = max(max(len_arr), 1) target_bboxes = torch.zeros((len(gt_bboxes), batch_maxlen, 4)).float() target_labels = torch.zeros((len(gt_bboxes), batch_maxlen)).long() for i in range(len(gt_bboxes)): if len_arr[i] == 0: continue target_bboxes[i, :len_arr[i], :] = gt_bboxes[i].clone() target_labels[i, :len_arr[i]] = gt_labels[i].clone() target_bboxes_num = torch.Tensor(len_arr).long() return target_bboxes, target_bboxes_num, target_labels def __get_object_list(self, batch_detections): batch_pred_bboxes = list() for idx, detections in enumerate(batch_detections): object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: xmin = x1.cpu().item() ymin = y1.cpu().item() xmax = x2.cpu().item() ymax = y2.cpu().item() cf = conf.cpu().item() cls_pred = int(cls_pred.cpu().item()) - 1 object_list.append( [xmin, ymin, xmax, ymax, cls_pred, float('%.2f' % cf)]) batch_pred_bboxes.append(object_list) return batch_pred_bboxes def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get( 'network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class FCNSegmentor(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.seg_running_score = SegRunningScore(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_loss_manager = SegLossManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.data_transformer = DataTransformer(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.optim_scheduler = OptimScheduler(configer) self.seg_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = self.module_utilizer.load_net(self.seg_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.seg_data_loader.get_trainloader() self.val_loader = self.seg_data_loader.get_valloader() self.pixel_loss = self.seg_loss_manager.get_seg_loss('fcn_seg_loss') if self.configer.get('network', 'bn_type') == 'syncbn': self.pixel_loss = DataParallelCriterion(self.pixel_loss).cuda() def _get_parameters(self): lr_1 = [] lr_10 = [] params_dict = dict(self.seg_net.named_parameters()) for key, value in params_dict.items(): if 'backbone.' not in key: lr_10.append(value) else: lr_1.append(value) params = [{ 'params': lr_1, 'lr': self.configer.get('lr', 'base_lr') }, { 'params': lr_10, 'lr': self.configer.get('lr', 'base_lr') * 1.0 }] return params def __train(self): """ Train function of every epoch during train phase. """ self.seg_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.scheduler.step(self.configer.get('epoch')) for i, data_dict in enumerate(self.train_loader): inputs = data_dict['img'] targets = data_dict['labelmap'] self.data_time.update(time.time() - start_time) # Change the data type. inputs, targets = self.module_utilizer.to_device(inputs, targets) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the train batch & backward. loss = self.pixel_loss(outputs, targets) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.configer.get('iters') % self.configer.get('solver', 'test_interval') == 0: self.__val() self.configer.plus_one('epoch') def __val(self): """ Validation function during the train phase. """ self.seg_net.eval() start_time = time.time() for j, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] targets = data_dict['labelmap'] with torch.no_grad(): # Change the data type. inputs, targets = self.module_utilizer.to_device( inputs, targets) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the val batch. loss = self.pixel_loss(outputs, targets) outputs = self.module_utilizer.gather(outputs) pred = outputs[0] self.val_losses.update(loss.item(), inputs.size(0)) self.seg_running_score.update( pred.max(1)[1].cpu().numpy(), targets.cpu().numpy()) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.update_value(['performance'], self.seg_running_score.get_mean_iou()) self.configer.update_value(['val_loss'], self.val_losses.avg) self.module_utilizer.save_net(self.seg_net, save_mode='performance') self.module_utilizer.save_net(self.seg_net, save_mode='val_loss') # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Mean IOU: {}\n'.format( self.seg_running_score.get_mean_iou())) self.batch_time.reset() self.val_losses.reset() self.seg_running_score.reset() self.seg_net.train() def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get( 'network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class RPNPoseTest(object): def __init__(self, configer): self.configer = configer self.pose_visualizer = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None def init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) image_raw = ImageHelper.cv2_open_bgr(image_path) inputs = ImageHelper.bgr2rgb(image_raw) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(inputs) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( image_raw, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) subset, img_canvas = self.__draw_key_point(subset, all_peaks, image_raw) img_canvas = self.__link_key_point(img_canvas, candidate, subset) cv2.imwrite(save_path, img_canvas) def __get_paf_and_heatmap(self, img_raw): multiplier = [ scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1] for scale in self.configer.get('data', 'scale_search') ] heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out'))) paf_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'paf_out'))) for i, scale in enumerate(multiplier): img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) img_test_pad, pad = PadImage(self.configer.get( 'network', 'stride'))(img_test) img_test_pad = ToTensor()(img_test_pad) img_test_pad = Normalize( mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(img_test_pad) with torch.no_grad(): img_test_pad = img_test_pad.unsqueeze(0).to(self.device) paf_out, heatmap_out = self.pose_net(img_test_pad) # extract outputs, resize, and remove padding heatmap = heatmap_out.data.squeeze().cpu().numpy().transpose( 1, 2, 0) heatmap = cv2.resize(heatmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:img_test_pad.size(2) - pad[3], :img_test_pad.size(3) - pad[2], :] heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) paf = paf_out.data.squeeze().cpu().numpy().transpose(1, 2, 0) paf = cv2.resize(paf, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) paf = paf[:img_test_pad.size(2) - pad[3], :img_test_pad.size(3) - pad[2], :] paf = cv2.resize(paf, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) return paf_avg, heatmap_avg def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] peak_counter = 0 for part in range(self.configer.get('data', 'num_keypoints')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip( np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] ids = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [ peaks_with_score[i] + (ids[i], ) for i in range(len(ids)) ] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def __extract_paf_info(self, img_raw, paf_avg, all_peaks): connection_all = [] special_k = [] mid_num = 10 for k in range(len(self.configer.get('details', 'limb_seq'))): score_mid = paf_avg[:, :, [k * 2, k * 2 + 1]] # self.pose_visualizer.vis_paf(score_mid, img_raw, name='pa{}'.format(k)) candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1] candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9 vec = np.divide(vec, norm) startend = zip( np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num)) startend = list(startend) vec_x = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend)) ]) vec_y = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend)) ]) score_midpts = np.multiply( vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len( score_midpts) score_with_dist_prior += min( 0.5 * img_raw.shape[0] / norm - 1, 0) num_positive = len( np.nonzero(score_midpts > self.configer.get( 'vis', 'limb_threshold'))[0]) criterion1 = num_positive > int( 0.8 * len(score_midpts)) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2] ]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack( [connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return special_k, connection_all def __get_subsets(self, connection_all, special_k, all_peaks): # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones((0, 20)) candidate = np.array( [item for sublist in all_peaks for item in sublist]) for k in range(len(self.configer.get('details', 'limb_seq'))): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array( self.configer.get('details', 'limb_seq')[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][ indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if (subset[j][indexB] != partBs[i]): subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found and k < 17: row = -1 * np.ones(20) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum( candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) return subset, candidate def __draw_key_point(self, subset, all_peaks, img_raw): del_ids = [] for i in range(len(subset)): if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: #del_ids.append(i) pass subset = np.delete(subset, del_ids, axis=0) img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('data', 'num_keypoints')): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'circle_radius'), self.configer.get('details', 'color_list')[i], thickness=-1) return subset, img_canvas def __link_key_point(self, img_canvas, candidate, subset): for i in range(self.configer.get('data', 'num_keypoints') - 1): for n in range(len(subset)): index = subset[n][ np.array(self.configer.get('details', 'limb_seq')[i]) - 1] if -1 in index: continue cur_canvas = img_canvas.copy() Y = candidate[index.astype(int), 0] X = candidate[index.astype(int), 1] mX = np.mean(X) mY = np.mean(Y) length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5 angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) polygon = cv2.ellipse2Poly( (int(mY), int(mX)), (int(length / 2), self.configer.get('vis', 'stick_width')), int(angle), 0, 360, 1) cv2.fillConvexPoly( cur_canvas, polygon, self.configer.get('details', 'color_list')[i]) img_canvas = cv2.addWeighted(img_canvas, 0.4, cur_canvas, 0.6, 0) return img_canvas def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') if not os.path.exists(base_dir): os.makedirs(base_dir) filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) self.__test_img(image_path, save_path) def __create_coco_submission(self, test_dir=None, base_dir=None): out_file = os.path.join(base_dir, 'person_keypoints_val2017_donny_results.json') out_list = list() coco = COCO(os.path.join(test_dir, 'person_keypoints_val2017.json')) for i, img_id in enumerate(list(coco.imgs.keys())): filename = coco.imgs[img_id]['file_name'] image_raw = cv2.imread(os.path.join(test_dir, 'val2017', filename)) print(i) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(image_raw) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( image_raw, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) subset, img_canvas = self.__draw_key_point(subset, all_peaks, image_raw) img_canvas = self.__link_key_point(img_canvas, candidate, subset) cv2.imwrite(os.path.join(base_dir, filename), img_canvas) for n in range(len(subset)): dict_temp = dict() dict_temp['image_id'] = img_id dict_temp['category_id'] = 1 dict_temp['score'] = subset[n][-2] pose_list = list() for i in range(self.configer.get('data', 'num_keypoints') - 1): index = subset[n][self.configer.get( 'details', 'coco_to_ours')[i]] if index == -1: pose_list.append(0) pose_list.append(0) else: pose_list.append(candidate[index.astype(int)][0]) pose_list.append(candidate[index.astype(int)][1]) pose_list.append(1) dict_temp['keypoints'] = pose_list out_list.append(dict_temp) fw = open(out_file, 'w') fw.write(json.dumps(out_list)) fw.close() def create_submission(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) test_dir = self.configer.get('test_dir') if self.configer.get('dataset') == 'coco': self.__create_coco_submission(test_dir, base_dir) else: Log.error('Dataset: {} is not valid.'.format( self.configer.get('dataset'))) exit(1) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(os.path.join(dir_name, item)): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class FCNSegmentor(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.seg_visualizer = SegVisualizer(configer) self.seg_loss_manager = SegLossManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.seg_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.lr = None self.iters = None def init_model(self): self.seg_net = self.seg_model_manager.seg_net() self.iters = 0 self.seg_net, _ = self.module_utilizer.load_net(self.seg_net) self.optimizer, self.lr = self.module_utilizer.update_optimizer( self.seg_net, self.iters) if self.configer.get('dataset') == 'cityscape': self.train_loader = self.seg_data_loader.get_trainloader( FSCityScapeLoader) self.val_loader = self.seg_data_loader.get_valloader( FSCityScapeLoader) else: Log.error('Dataset: {} is not valid!'.format( self.configer.get('dataset'))) exit(1) self.pixel_loss = self.seg_loss_manager.get_seg_loss( 'cross_entropy_loss') def __train(self): """ Train function of every epoch during train phase. """ self.seg_net.train() start_time = time.time() # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects) for i, data_tuple in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. if len(data_tuple) < 2: Log.error('Train Loader Error!') exit(0) inputs = Variable(data_tuple[0].cuda(async=True)) targets = Variable(data_tuple[1].cuda(async=True)) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the train batch & backward. loss_pixel = self.pixel_loss(outputs, targets) loss = loss_pixel self.train_losses.update(loss.data[0], inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.iters += 1 # Print the log info & reset the states. if self.iters % self.configer.get('solver', 'display_iter') == 0: Log.info( 'Train Iteration: {0}\t' 'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n' 'Learning rate = {2}\n' 'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.iters % self.configer.get('solver', 'test_interval') == 0: self.__val() self.optimizer, self.lr = self.module_utilizer.update_optimizer( self.seg_net, self.iters) def __val(self): """ Validation function during the train phase. """ self.seg_net.eval() start_time = time.time() for j, data_tuple in enumerate(self.val_loader): # Change the data type. inputs = Variable(data_tuple[0].cuda(async=True), volatile=True) targets = Variable(data_tuple[1].cuda(async=True), volatile=True) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the val batch. loss_pixel = self.pixel_loss(outputs, targets) loss = loss_pixel self.val_losses.update(loss.data[0], inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.seg_net, self.iters) # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.seg_net.train() def train(self): cudnn.benchmark = True while self.iters < self.configer.get('solver', 'max_iter'): self.__train() if self.iters == self.configer.get('solver', 'max_iter'): break
class ConvPoseMachine(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.pose_visualizer = PoseVisualizer(configer) self.loss_manager = PoseLossManager(configer) self.model_manager = PoseModelManager(configer) self.train_utilizer = ModuleUtilizer(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.best_model_loss = None self.is_best = None self.lr = None self.iters = None def init_model(self, train_loader=None, val_loader=None): self.pose_net = self.model_manager.pose_detector() self.pose_net, self.iters = self.train_utilizer.load_net(self.pose_net) self.optimizer = self.train_utilizer.update_optimizer( self.pose_net, self.iters) self.train_loader = train_loader self.val_loader = val_loader self.heatmap_loss = self.loss_manager.get_pose_loss('heatmap_loss') def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects) for i, data_tuple in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. if len(data_tuple) < 2: Log.error('Train Loader Error!') exit(0) inputs = Variable(data_tuple[0].cuda(async=True)) heatmap = Variable(data_tuple[1].cuda(async=True)) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True)) self.pose_visualizer.vis_tensor(heatmap, name='heatmap') self.pose_visualizer.vis_tensor((inputs * 256 + 128) / 255, name='image') # Forward pass. outputs = self.pose_net(inputs) self.pose_visualizer.vis_tensor(outputs, name='output') self.pose_visualizer.vis_peaks(inputs, outputs, name='peak') # Compute the loss of the train batch & backward. loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap) loss = loss_heatmap self.train_losses.update(loss.data[0], inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.iters += 1 # Print the log info & reset the states. if self.iters % self.configer.get('solver', 'display_iter') == 0: Log.info( 'Train Iteration: {0}\t' 'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n' 'Learning rate = {2}\n' 'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.iters % self.configer.get('solver', 'test_interval') == 0: self.__val() self.optimizer = self.train_utilizer.update_optimizer( self.pose_net, self.iters) def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() for j, data_tuple in enumerate(self.val_loader): # Change the data type. inputs = Variable(data_tuple[0].cuda(async=True), volatile=True) heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True), volatile=True) # Forward pass. outputs = self.pose_net(inputs) self.pose_visualizer.vis_peaks(inputs, outputs, name='peak_val') # Compute the loss of the val batch. loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap) loss = loss_heatmap self.val_losses.update(loss.data[0], inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train() def train(self): cudnn.benchmark = True while self.iters < self.configer.get('solver', 'max_iter'): self.__train() if self.iters == self.configer.get('solver', 'max_iter'): break def test(self, img_path=None, img_dir=None): if img_path is not None and os.path.exists(img_path): image = Image.open(img_path).convert('RGB')
class SingleShotDetector(object): """ The class for Single Shot Detector. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.det_visualizer = DetVisualizer(configer) self.det_loss_manager = DetLossManager(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.det_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None def init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.det_data_loader.get_trainloader() self.val_loader = self.det_data_loader.get_valloader() self.det_loss = self.det_loss_manager.get_det_loss('multibox_loss') def _get_parameters(self): return self.det_net.parameters() def __train(self): """ Train function of every epoch during train phase. """ if self.configer.get( 'network', 'resume') is not None and self.configer.get('iters') == 0: self.__val() self.det_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, (inputs, bboxes, labels) in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. inputs, bboxes, labels = self.module_utilizer.to_device( inputs, bboxes, labels) # Forward pass. loc, cls = self.det_net(inputs) # Compute the loss of the train batch & backward. loss = self.det_loss(loc, bboxes, cls, labels) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ (self.configer.get('iters')) % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.det_net.eval() start_time = time.time() with torch.no_grad(): for j, (inputs, bboxes, labels) in enumerate(self.val_loader): # Change the data type. inputs, bboxes, labels = self.module_utilizer.to_device( inputs, bboxes, labels) # Forward pass. loc, cls = self.det_net(inputs) # Compute the loss of the val batch. loss = self.det_loss(loc, bboxes, cls, labels) self.val_losses.update(loss.item(), inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.det_net, metric='iters') # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.det_net.train() def train(self): cudnn.benchmark = True while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class ConvPoseMachineTest(object): def __init__(self, configer): self.configer = configer self.pose_vis = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.pose_net = None def init_model(self): self.pose_net = self.pose_model_manager.pose_detector() self.pose_net, _ = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): image_raw = cv2.imread(image_path) heatmap_avg = self.__get_heatmap(image_raw) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_raw) cv2.imwrite(save_path, image_save) def __get_heatmap(self, img_raw): multiplier = [ scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1] for scale in self.configer.get('data', 'scale_search') ] heatmap_avg = np.zeros(img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out')) for i, scale in enumerate(multiplier): img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) img_test_pad, pad = PadImage( self.configer.get('network', 'stride'), 0)(img_test) img_test_pad = np.transpose( np.float32(img_test_pad[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 feed = Variable(torch.from_numpy(img_test_pad)).cuda() heatmap = self.pose_net(feed) # extract outputs, resize, and remove padding heatmap = heatmap.data.squeeze().cpu().transpose(1, 2, 0) heatmap = cv2.resize(heatmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:img_test_pad.shape[0] - pad[2], :img_test_pad.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) return heatmap_avg def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] for part in range(self.configer.get('data', 'num_keypoints')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip( np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] all_peaks.append(peaks_with_score) return all_peaks def __draw_key_point(self, all_peaks, img_raw): img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('data', 'num_keypoints')): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'stick_width'), self.configer.get('details', 'color_list')[i], thickness=-1) return img_canvas def test(self, test_img=None, test_dir=None): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'test') if not os.path.exists(base_dir): os.makedirs(base_dir) if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) self.__test_img(image_path, save_path) def __create_coco_submission(self, test_dir=None, base_dir=None): pass def create_submission(self, test_dir=None): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) if self.configer.get('dataset') == 'coco': self.__create_coco_submission(test_dir) else: Log.error('Dataset: {} is not valid.'.format( self.configer.get('dataset'))) exit(1) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(item): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.seg_vis = SegVisualizer(configer) self.seg_model_manager = SegModelManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.seg_net = None def init_model(self): self.seg_net = self.seg_model_manager.seg_net() self.seg_net, _ = self.module_utilizer.load_net(self.seg_net) self.seg_net.eval() def forward(self, image_path): image = Image.open(image_path).convert('RGB') image = RandomResize(size=self.configer.get('data', 'input_size'), is_base=False)(image) image = ToTensor()(image) image = Normalize(mean=[128.0, 128.0, 128.0], std=[256.0, 256.0, 256.0])(image) inputs = Variable(image.unsqueeze(0).cuda(), volatile=True) results = self.seg_net.forward(inputs) return results.data.cpu().numpy().argmax(axis=1)[0].squeeze() def __test_img(self, image_path, save_path): if self.configer.get('dataset') == 'cityscape': self.__test_cityscape_img(image_path, save_path) elif self.configer.get('dataset') == 'laneline': self.__test_laneline_img(image_path, save_path) else: Log.error('Dataset: {} is not valid.'.format(self.configer.get('dataset'))) exit(1) def __test_cityscape_img(self, img_path, save_path): color_list = [(128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)] result = self.forward(img_path) width = self.configer.get('data', 'input_size')[0] // self.configer.get('network', 'stride') height = self.configer.get('data', 'input_size')[1] // self.configer.get('network', 'stride') color_dst = np.zeros((height, width, 3), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): color_dst[result == i] = color_list[i] color_img = np.array(color_dst, dtype=np.uint8) color_img = Image.fromarray(color_img, 'RGB') color_img.save(save_path) def __test_laneline_img(self, img_path, save_path): pass def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/seg', self.configer.get('dataset'), 'test') if not os.path.exists(base_dir): os.makedirs(base_dir) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) self.__test_img(image_path, save_path) def __create_cityscape_submission(self, test_dir=None, base_dir=None): label_list = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33] for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) result = self.forward(image_path) width = self.configer.get('data', 'input_size')[0] // self.configer.get('network', 'stride') height = self.configer.get('data', 'input_size')[1] // self.configer.get('network', 'stride') label_dst = np.ones((height, width), dtype=np.uint8) * 255 for i in range(self.configer.get('data', 'num_classes')): label_dst[result == i] = label_list[i] label_img = np.array(label_dst, dtype=np.uint8) label_img = Image.fromarray(label_img, 'P') label_img.save(save_path) def create_submission(self, test_dir=None): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/seg', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) if self.configer.get('dataset') == 'cityscape': self.__create_cityscape_submission(test_dir, base_dir) else: Log.error('Dataset: {} is not valid.'.format(self.configer.get('dataset'))) exit(1) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(item): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class SingleShotDetectorTest(object): def __init__(self, configer): self.configer = configer self.det_visualizer = DetVisualizer(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.default_boxes = PriorBoxLayer(configer)() self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None def init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) image_raw = ImageHelper.cv2_open_bgr(image_path) inputs = ImageHelper.bgr2rgb(image_raw) inputs = ImageHelper.resize(inputs, tuple(self.configer.get('data', 'input_size')), Image.CUBIC) inputs = ToTensor()(inputs) inputs = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) bbox, cls = self.det_net(inputs) bbox = bbox.cpu().data.squeeze(0) cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data boxes, lbls, scores, has_obj = self.__decode(bbox, cls) if has_obj: boxes = boxes.cpu().numpy() boxes = np.clip(boxes, 0, 1) lbls = lbls.cpu().numpy() scores = scores.cpu().numpy() img_canvas = self.__draw_box(image_raw, boxes, lbls, scores) else: # print('None obj detected!') img_canvas = image_raw Log.info('Save Path: {}'.format(save_path)) cv2.imwrite(save_path, img_canvas) # Boxes is within 0-1. self.__save_json(save_path, boxes, lbls, scores, image_raw) return image_raw, lbls, scores, boxes, has_obj def __draw_box(self, img_raw, box_list, label_list, conf): img_canvas = img_raw.copy() img_shape = img_canvas.shape for bbox, label, cf in zip(box_list, label_list, conf): if cf < self.configer.get('vis', 'conf_threshold'): continue xmin = int(bbox[0] * img_shape[1]) xmax = int(bbox[2] * img_shape[1]) ymin = int(bbox[1] * img_shape[0]) ymax = int(bbox[3] * img_shape[0]) class_name = self.configer.get('details', 'name_seq')[label - 1] + str(cf) c = self.configer.get('details', 'color_list')[label - 1] cv2.rectangle(img_canvas, (xmin, ymin), (xmax, ymax), color=c, thickness=3) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img_canvas, class_name, (xmin + 5, ymax - 5), font, fontScale=0.5, color=c, thickness=2) return img_canvas def __nms(self, bboxes, scores, mode='union'): """Non maximum suppression. Args: bboxes(tensor): bounding boxes, sized [N,4]. scores(tensor): bbox scores, sized [N,]. threshold(float): overlap threshold. mode(str): 'union' or 'min'. Returns: keep(tensor): selected indices. Ref: https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py """ x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] areas = (x2 - x1) * (y2 - y1) _, order = scores.sort(0, descending=True) keep = [] while order.numel() > 0: i = order[0] keep.append(i) if order.numel() == 1: break xx1 = x1[order[1:]].clamp(min=x1[i]) yy1 = y1[order[1:]].clamp(min=y1[i]) xx2 = x2[order[1:]].clamp(max=x2[i]) yy2 = y2[order[1:]].clamp(max=y2[i]) w = (xx2-xx1).clamp(min=0) h = (yy2-yy1).clamp(min=0) inter = w*h if self.configer.get('nms', 'mode') == 'union': ovr = inter / (areas[i] + areas[order[1:]] - inter) elif self.configer.get('nms', 'mode') == 'min': ovr = inter / areas[order[1:]].clamp(max=areas[i]) else: raise TypeError('Unknown nms mode: %s.' % mode) ids = (ovr <= self.configer.get('nms', 'overlap_threshold')).nonzero().squeeze() if ids.numel() == 0: break order = order[ids + 1] return torch.LongTensor(keep) def __decode(self, loc, conf): """Transform predicted loc/conf back to real bbox locations and class labels. Args: loc: (tensor) predicted loc, sized [8732, 4]. conf: (tensor) predicted conf, sized [8732, 21]. Returns: boxes: (tensor) bbox locations, sized [#obj, 4]. labels: (tensor) class labels, sized [#obj,1]. """ has_obj = False variances = [0.1, 0.2] wh = torch.exp(loc[:, 2:] * variances[1]) * self.default_boxes[:, 2:] cxcy = loc[:, :2] * variances[0] * self.default_boxes[:, 2:] + self.default_boxes[:, :2] boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 1) # [8732,4] max_conf, labels = conf.max(1) # [8732,1] ids = labels.nonzero() tmp = ids.cpu().numpy() if tmp.__len__() > 0: # print('detected %d objs' % tmp.__len__()) ids = ids.squeeze(1) # [#boxes,] has_obj = True else: print('None obj detected!') return 0, 0, 0, has_obj keep = self.__nms(boxes[ids], max_conf[ids]) return boxes[ids][keep], labels[ids][keep], max_conf[ids][keep], has_obj def __save_json(self, save_path, box_list, label_list, conf, image_raw): file_name = '{}.json'.format(save_path[:-4], ".json") json_file_stream = open(file_name, 'w') size = image_raw.shape json_dict = dict() object_list = list() for bbox, label, cf in zip(box_list, label_list, conf): if cf < self.configer.get('vis', 'conf_threshold'): continue object_dict = dict() xmin = bbox[0] * size[1] xmax = bbox[2] * size[1] ymin = bbox[1] * size[0] ymax = bbox[3] * size[0] object_dict['bbox'] = [xmin, xmax, ymin, ymax] object_dict['label'] = label - 1 object_list.append(object_dict) json_dict['objects'] = object_list json_file_stream.write(json.dumps(json_dict)) json_file_stream.close() def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') if not os.path.exists(base_dir): os.makedirs(base_dir) filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) self.__test_img(image_path, save_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) val_data_loader = self.det_data_loader.get_valloader(SSDDataLoader) count = 0 for i, (inputs, bboxes, labels) in enumerate(val_data_loader): for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img = DeNormalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs[j]) ori_img = ori_img.numpy().transpose(1, 2, 0) image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view(inputs.size(0), -1, self.configer.get('data', 'num_classes')) boxes, lbls, scores, has_obj = self.__decode(bboxes[j], labels_target[j]) if has_obj: boxes = boxes.cpu().numpy() boxes = np.clip(boxes, 0, 1) lbls = lbls.cpu().numpy() scores = scores.cpu().numpy() img_canvas = self.__draw_box(image_bgr, boxes, lbls, scores) else: # print('None obj detected!') img_canvas = image_bgr # self.det_visualizer.vis_bboxes(paf_avg, image_rgb.astype(np.uint8), name='314{}_{}'.format(i,j)) cv2.imwrite(os.path.join(base_dir, '{}_{}_result.jpg'.format(i, j)), img_canvas) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(os.path.join(dir_name, item)): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class OpenPoseTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.pose_visualizer = PoseVisualizer(configer) self.pose_parser = PoseParser(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.heatmap_generator = HeatmapGenerator(configer) self.paf_generator = PafGenerator(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None self._init_model() def _init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get( 'data', 'input_mode')) heatmap_avg = np.zeros( (ori_height, ori_width, self.configer.get('network', 'heatmap_out'))) paf_avg = np.zeros( (ori_height, ori_width, self.configer.get('network', 'paf_out'))) multiplier = [ scale * self.configer.get('test', 'input_size')[0] / ori_width for scale in self.configer.get('test', 'scale_search') ] stride = self.configer.get('network', 'stride') for i, scale in enumerate(multiplier): target_size = [ math.ceil((ori_width * scale) / stride) * stride, math.ceil((ori_height * scale) / stride) * stride ] image = self.blob_helper.make_input(ori_image, input_size=target_size, scale=1.0) with torch.no_grad(): paf_out_list, heatmap_out_list = self.pose_net(image) paf_out = paf_out_list[-1] heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose( 1, 2, 0) heatmap = cv2.resize(heatmap, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) paf = paf_out.squeeze(0).cpu().numpy().transpose(1, 2, 0) paf = cv2.resize(paf, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( ori_img_bgr, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(ori_img_bgr, subset, candidate) image_canvas = self.pose_parser.draw_points(ori_img_bgr.copy(), json_dict) image_canvas = self.pose_parser.link_points(image_canvas, json_dict) ImageHelper.save(image_canvas, vis_path) ImageHelper.save(ori_img_bgr, raw_path) Log.info('Json Save Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) def __get_info_tree(self, image_raw, subset, candidate): json_dict = dict() height, width, _ = image_raw.shape json_dict['image_height'] = height json_dict['image_width'] = width object_list = list() for n in range(len(subset)): if subset[n][-1] < self.configer.get('vis', 'num_threshold'): continue if subset[n][-2] / subset[n][-1] < self.configer.get( 'vis', 'avg_threshold'): continue object_dict = dict() object_dict['kpts'] = np.zeros( (self.configer.get('data', 'num_kpts'), 3)).tolist() for j in range(self.configer.get('data', 'num_kpts')): index = subset[n][j] if index == -1: object_dict['kpts'][j][0] = -1 object_dict['kpts'][j][1] = -1 object_dict['kpts'][j][2] = -1 else: object_dict['kpts'][j][0] = candidate[index.astype(int)][0] object_dict['kpts'][j][1] = candidate[index.astype(int)][1] object_dict['kpts'][j][2] = 1 object_dict['score'] = subset[n][-2] object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] peak_counter = 0 for part in range(self.configer.get('data', 'num_kpts')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip( np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) del_flag = [0 for i in range(len(peaks))] for i in range(len(peaks)): if del_flag[i] == 0: for j in range(i + 1, len(peaks)): if max(abs(peaks[i][0] - peaks[j][0]), abs(peaks[i][1] - peaks[j][1])) <= 6: del_flag[j] = 1 new_peaks = list() for i in range(len(peaks)): if del_flag[i] == 0: new_peaks.append(peaks[i]) peaks = new_peaks peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] ids = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [ peaks_with_score[i] + (ids[i], ) for i in range(len(ids)) ] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def __extract_paf_info(self, img_raw, paf_avg, all_peaks): connection_all = [] special_k = [] mid_num = self.configer.get('vis', 'mid_point_num') for k in range(len(self.configer.get('details', 'limb_seq'))): score_mid = paf_avg[:, :, [k * 2, k * 2 + 1]] candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1] candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9 vec = np.divide(vec, norm) startend = zip( np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num)) startend = list(startend) vec_x = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend)) ]) vec_y = np.array([ score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend)) ]) score_midpts = np.multiply( vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len( score_midpts) score_with_dist_prior += min( 0.5 * img_raw.shape[0] / norm - 1, 0) num_positive = len( np.nonzero(score_midpts > self.configer.get( 'vis', 'limb_threshold'))[0]) criterion1 = num_positive > int( self.configer.get('vis', 'limb_pos_ratio') * len(score_midpts)) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2] ]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack( [connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return special_k, connection_all def __get_subsets(self, connection_all, special_k, all_peaks): # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones((0, self.configer.get('data', 'num_kpts') + 2)) candidate = np.array( [item for sublist in all_peaks for item in sublist]) for k in self.configer.get('details', 'mini_tree'): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array( self.configer.get('details', 'limb_seq')[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][ indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if (subset[j][indexB] != partBs[i]): subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found: row = -1 * np.ones( self.configer.get('data', 'num_kpts') + 2) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum( candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) return subset, candidate def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) img_count = 0 for filename in FileHelper.list_dir(test_dir): img_count += 1 if img_count > 1200: break image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/pose', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.pose_data_loader.get_trainloader()): inputs = data_dict['img'] maskmap = data_dict['maskmap'] input_size = [inputs.size(3), inputs.size(2)] heatmap = self.heatmap_generator(data_dict['kpts'], input_size, maskmap=maskmap) vecmap = self.paf_generator(data_dict['kpts'], input_size, maskmap=maskmap) for j in range(inputs.size(0)): count = count + 1 if count > 10: exit(1) Log.info(heatmap.size()) image_bgr = self.blob_helper.tensor2bgr(inputs[j]) mask_canvas = maskmap[j].repeat(3, 1, 1).numpy().transpose(1, 2, 0) mask_canvas = (mask_canvas * 255).astype(np.uint8) mask_canvas = cv2.resize( mask_canvas, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) image_bgr = cv2.addWeighted(image_bgr, 0.6, mask_canvas, 0.4, 0) heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0) heatmap_avg = cv2.resize( heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) paf_avg = vecmap[j].numpy().transpose(1, 2, 0) paf_avg = cv2.resize(paf_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) self.pose_visualizer.vis_peaks(heatmap_avg, image_bgr) self.pose_visualizer.vis_paf(paf_avg, image_bgr) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info( image_bgr, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) json_dict = self.__get_info_tree(image_bgr, subset, candidate) image_canvas = self.pose_parser.draw_points( image_bgr, json_dict) image_canvas = self.pose_parser.link_points( image_canvas, json_dict) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class OpenPose(object): """ The class for Pose Estimation. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.vis = PoseVisualizer(configer) self.loss_manager = PoseLossManager(configer) self.model_manager = PoseModelManager(configer) self.data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.lr = None self.iters = None def init_model(self): self.pose_net = self.model_manager.pose_detector() self.iters = 0 self.pose_net, _ = self.module_utilizer.load_net(self.pose_net) self.optimizer, self.lr = self.module_utilizer.update_optimizer(self.pose_net, self.iters) if self.configer.get('dataset') == 'coco': self.train_loader = self.data_loader.get_trainloader(OPCocoLoader) self.val_loader = self.data_loader.get_valloader(OPCocoLoader) else: Log.error('Dataset: {} is not valid!'.format(self.configer.get('dataset'))) exit(1) self.mse_loss = self.loss_manager.get_pose_loss('mse_loss') def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, data_tuple in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. if len(data_tuple) < 2: Log.error('Train Loader Error!') exit(0) inputs = Variable(data_tuple[0].cuda(async=True)) heatmap = Variable(data_tuple[1].cuda(async=True)) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True)) # Forward pass. paf_out, heatmap_out = self.pose_net(inputs) self.vis.vis_paf(paf_out, inputs.data.cpu().squeeze().numpy().transpose(1, 2, 0), name='paf_out') # Compute the loss of the train batch & backward. loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap) loss = loss_heatmap if len(data_tuple) > 3: vecmap = Variable(data_tuple[3].cuda(async=True)) self.vis.vis_paf(vecmap, inputs.data.cpu().squeeze().numpy().transpose(1, 2, 0), name='paf') loss_associate = self.mse_loss(paf_out, vecmap, maskmap) loss += loss_associate self.train_losses.update(loss.data[0], inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.iters += 1 # Print the log info & reset the states. if self.iters % self.configer.get('solver', 'display_iter') == 0: Log.info('Train Iteration: {0}\t' 'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n' 'Learning rate = {2}\n' 'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.iters % self.configer.get('solver', 'test_interval') == 0: self.__val() # Adjust the learning rate after every iteration. self.optimizer, self.lr = self.module_utilizer.update_optimizer(self.pose_net, self.iters) def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() for j, data_tuple in enumerate(self.val_loader): # Change the data type. inputs = Variable(data_tuple[0].cuda(async=True), volatile=True) heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True), volatile=True) # Forward pass. paf_out, heatmap_out = self.pose_net(inputs) # Compute the loss of the val batch. loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap) loss = loss_heatmap if len(data_tuple) > 3: vecmap = Variable(data_tuple[3].cuda(async=True), volatile=True) loss_associate = self.mse_loss(paf_out, vecmap, maskmap) loss = loss_heatmap + loss_associate self.val_losses.update(loss.data[0], inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.pose_net, self.iters) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format( batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train() def train(self): cudnn.benchmark = True while self.iters < self.configer.get('solver', 'max_iter'): self.__train() if self.iters == self.configer.get('solver', 'max_iter'): break
class ConvPoseMachine(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.pose_visualizer = PoseVisualizer(configer) self.pose_loss_manager = PoseLossManager(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.data_transformer = DataTransformer(configer) self.heatmap_generator = HeatmapGenerator(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.pose_net = self.pose_model_manager.single_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer(self._get_parameters()) self.train_loader = self.pose_data_loader.get_trainloader() self.val_loader = self.pose_data_loader.get_valloader() self.mse_loss = self.pose_loss_manager.get_pose_loss('mse_loss') def _get_parameters(self): return self.pose_net.parameters() def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects) for i, data_dict in enumerate(self.train_loader): inputs = data_dict['img'] input_size = [inputs.size(3), inputs.size(2)] heatmap = self.heatmap_generator(data_dict['kpts'], input_size) self.data_time.update(time.time() - start_time) # Change the data type. inputs, heatmap = self.module_utilizer.to_device(inputs, heatmap) # self.pose_visualizer.vis_peaks(heatmap[0], inputs[0], name='cpm') # Forward pass. outputs = self.pose_net(inputs) # Compute the loss of the train batch & backward. loss = self.mse_loss(outputs, heatmap, maskmap) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get('solver', 'display_iter') == 0: Log.info('Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.configer.get('iters') % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() with torch.no_grad(): for j, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] input_size = [inputs.size(3), inputs.size(2)] heatmap = self.heatmap_generator(data_dict['kpts'], input_size) # Change the data type. inputs, heatmap = self.module_utilizer.to_device(inputs, heatmap) # Forward pass. outputs = self.pose_net(inputs) # Compute the loss of the val batch. loss = self.mse_loss(outputs[-1], heatmap) self.val_losses.update(loss.item(), inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.pose_net, save_mode='iters') # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format( batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train() def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get('network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get('solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get('solver', 'max_epoch'): break
class ConvPoseMachineTest(object): def __init__(self, configer): self.configer = configer self.pose_vis = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.pose_net = None def init_model(self): self.pose_net = self.pose_model_manager.pose_detector() self.pose_net, _ = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): image_raw = cv2.imread(image_path) heatmap_avg = self.__get_heatmap(image_raw) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_raw) cv2.imwrite(save_path, image_save) def __get_heatmap(self, img_raw): multiplier = [scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1] for scale in self.configer.get('data', 'scale_search')] heatmap_avg = np.zeros(img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out')) for i, scale in enumerate(multiplier): img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) img_test_pad, pad = PadImage(self.configer.get('network', 'stride'), 0)(img_test) img_test_pad = np.transpose(np.float32(img_test_pad[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 feed = Variable(torch.from_numpy(img_test_pad)).cuda() heatmap = self.pose_net(feed) # extract outputs, resize, and remove padding heatmap = heatmap.data.squeeze().cpu().transpose(1, 2, 0) heatmap = cv2.resize(heatmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:img_test_pad.shape[0] - pad[2], :img_test_pad.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) return heatmap_avg def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] for part in range(self.configer.get('data', 'num_keypoints')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] all_peaks.append(peaks_with_score) return all_peaks def __draw_key_point(self, all_peaks, img_raw): img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('data', 'num_keypoints')): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'stick_width'), self.configer.get('details', 'color_list')[i], thickness=-1) return img_canvas def test(self, test_img=None, test_dir=None): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'test') if not os.path.exists(base_dir): os.makedirs(base_dir) if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) self.__test_img(image_path, save_path) def __create_coco_submission(self, test_dir=None, base_dir=None): pass def create_submission(self, test_dir=None): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) if self.configer.get('dataset') == 'coco': self.__create_coco_submission(test_dir) else: Log.error('Dataset: {} is not valid.'.format(self.configer.get('dataset'))) exit(1) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(item): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class SingleShotDetectorTest(object): def __init__(self, configer): self.configer = configer self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.default_boxes = PriorBoxLayer(configer)() self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None def init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) ori_img_rgb = ImageHelper.img2np(ImageHelper.pil_open_rgb(image_path)) ori_img_bgr = ImageHelper.rgb2bgr(ori_img_rgb) inputs = ImageHelper.resize(ori_img_rgb, tuple(self.configer.get('data', 'input_size')), Image.CUBIC) inputs = ToTensor()(inputs) inputs = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) bbox, cls = self.det_net(inputs) bbox = bbox.cpu().data.squeeze(0) cls = F.softmax(cls.cpu().squeeze(0), dim=-1).data boxes, lbls, scores = self.__decode(bbox, cls) json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict def __nms(self, bboxes, scores, mode='union'): """Non maximum suppression. Args: bboxes(tensor): bounding boxes, sized [N,4]. scores(tensor): bbox scores, sized [N,]. threshold(float): overlap threshold. mode(str): 'union' or 'min'. Returns: keep(tensor): selected indices. Ref: https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py """ x1 = bboxes[:, 0] y1 = bboxes[:, 1] x2 = bboxes[:, 2] y2 = bboxes[:, 3] areas = (x2 - x1) * (y2 - y1) _, order = scores.sort(0, descending=True) keep = [] while order.numel() > 0: i = order[0] keep.append(i) if order.numel() == 1: break xx1 = x1[order[1:]].clamp(min=x1[i]) yy1 = y1[order[1:]].clamp(min=y1[i]) xx2 = x2[order[1:]].clamp(max=x2[i]) yy2 = y2[order[1:]].clamp(max=y2[i]) w = (xx2-xx1).clamp(min=0) h = (yy2-yy1).clamp(min=0) inter = w*h if self.configer.get('nms', 'mode') == 'union': ovr = inter / (areas[i] + areas[order[1:]] - inter) elif self.configer.get('nms', 'mode') == 'min': ovr = inter / areas[order[1:]].clamp(max=areas[i]) else: raise TypeError('Unknown nms mode: %s.' % mode) ids = (ovr <= self.configer.get('nms', 'overlap_threshold')).nonzero().squeeze() if ids.numel() == 0: break order = order[ids + 1] return torch.LongTensor(keep) def __decode(self, loc, conf): """Transform predicted loc/conf back to real bbox locations and class labels. Args: loc: (tensor) predicted loc, sized [8732, 4]. conf: (tensor) predicted conf, sized [8732, 21]. Returns: boxes: (tensor) bbox locations, sized [#obj, 4]. labels: (tensor) class labels, sized [#obj,1]. """ variances = [0.1, 0.2] wh = torch.exp(loc[:, 2:] * variances[1]) * self.default_boxes[:, 2:] cxcy = loc[:, :2] * variances[0] * self.default_boxes[:, 2:] + self.default_boxes[:, :2] boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 1) # [8732,4] max_conf, labels = conf.max(1) # [8732,1] ids = labels.nonzero() tmp = ids.cpu().numpy() if tmp.__len__() > 0: # print('detected %d objs' % tmp.__len__()) ids = ids.squeeze(1) # [#boxes,] keep = self.__nms(boxes[ids], max_conf[ids]) pred_bboxes = boxes[ids][keep].cpu().numpy() pred_bboxes = np.clip(pred_bboxes, 0, 1) pred_labels = labels[ids][keep].cpu().numpy() pred_confs = max_conf[ids][keep].cpu().numpy() return pred_bboxes, pred_labels, pred_confs else: Log.info('None object detected!') pred_bboxes = list() pred_labels = list() pred_confs = list() return pred_bboxes, pred_labels, pred_confs def __get_info_tree(self, box_list, label_list, conf, image_raw): height, width, _ = image_raw.shape json_dict = dict() object_list = list() for bbox, label, cf in zip(box_list, label_list, conf): if cf < self.configer.get('vis', 'conf_threshold'): continue object_dict = dict() xmin = bbox[0] * width xmax = bbox[2] * width ymin = bbox[1] * height ymax = bbox[3] * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = label - 1 object_dict['score'] = cf object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join(base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) if not os.path.exists(os.path.dirname(json_path)): os.makedirs(os.path.dirname(json_path)) if not os.path.exists(os.path.dirname(raw_path)): os.makedirs(os.path.dirname(raw_path)) if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path)) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join(base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) if not os.path.exists(os.path.dirname(json_path)): os.makedirs(os.path.dirname(json_path)) if not os.path.exists(os.path.dirname(raw_path)): os.makedirs(os.path.dirname(raw_path)) if not os.path.exists(os.path.dirname(vis_path)): os.makedirs(os.path.dirname(vis_path)) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) val_data_loader = self.det_data_loader.get_valloader() count = 0 for i, (inputs, bboxes, labels) in enumerate(val_data_loader): for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_rgb = DeNormalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs[j]) ori_img_rgb = ori_img_rgb.numpy().transpose(1, 2, 0).astype(np.uint8) ori_img_bgr = cv2.cvtColor(ori_img_rgb, cv2.COLOR_RGB2BGR) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view(inputs.size(0), -1, self.configer.get('data', 'num_classes')) boxes, lbls, scores = self.__decode(bboxes[j], labels_target[j]) json_dict = self.__get_info_tree(boxes, lbls, scores, ori_img_rgb) image_canvas = self.det_parser.draw_bboxes(ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class OpenPoseTest(object): def __init__(self, configer): self.configer = configer self.pose_visualizer = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.pose_net = None def init_model(self): self.pose_net = self.pose_model_manager.pose_detector() self.pose_net, _ = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): image_raw = cv2.imread(image_path) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(image_raw) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info(image_raw, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) subset, img_canvas = self.__draw_key_point(subset, all_peaks, image_raw) img_canvas = self.__link_key_point(img_canvas, candidate, subset) cv2.imwrite(save_path, img_canvas) def __get_paf_and_heatmap(self, img_raw): multiplier = [scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1] for scale in self.configer.get('data', 'scale_search')] heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out'))) paf_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'paf_out'))) for i, scale in enumerate(multiplier): img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) img_test_pad, pad = PadImage(self.configer.get('network', 'stride'), 0)(img_test) img_test_pad = ToTensor()(img_test_pad) img_test_pad = Normalize(mean=[128.0, 128.0, 128.0], std=[256.0, 256.0, 256.0])(img_test_pad) img_test_pad = Variable(img_test_pad.unsqueeze(0).cuda(), volatile=True) paf_out, heatmap_out = self.pose_net(img_test_pad) # extract outputs, resize, and remove padding heatmap = heatmap_out.data.squeeze().cpu().numpy().transpose(1, 2, 0) heatmap = cv2.resize(heatmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:img_test_pad.size(2) - pad[2], :img_test_pad.size(3) - pad[3], :] heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) paf = paf_out.data.squeeze().cpu().numpy().transpose(1, 2, 0) paf = cv2.resize(paf, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) paf = paf[:img_test_pad.size(2) - pad[2], :img_test_pad.size(3) - pad[3], :] paf = cv2.resize(paf, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) return paf_avg, heatmap_avg def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] peak_counter = 0 for part in range(self.configer.get('data', 'num_keypoints')): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] ids = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [peaks_with_score[i] + (ids[i],) for i in range(len(ids))] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) return all_peaks def __extract_paf_info(self, img_raw, paf_avg, all_peaks): connection_all = [] special_k = [] mid_num = 10 for k in range(len(self.configer.get('details', 'limb_seq'))): score_mid = paf_avg[:, :, [k*2, k*2+1]] # self.pose_visualizer.vis_paf(score_mid, img_raw, name='pa{}'.format(k)) candA = all_peaks[self.configer.get('details', 'limb_seq')[k][0] - 1] candB = all_peaks[self.configer.get('details', 'limb_seq')[k][1] - 1] nA = len(candA) nB = len(candB) if nA != 0 and nB != 0: connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) + 1e-9 vec = np.divide(vec, norm) startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num)) startend = list(startend) vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend))]) vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend))]) score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len(score_midpts) score_with_dist_prior += min(0.5 * img_raw.shape[0] / norm - 1, 0) num_positive = len(np.nonzero(score_midpts > self.configer.get('vis', 'limb_threshold'))[0]) criterion1 = num_positive > int(0.8 * len(score_midpts)) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append( [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if i not in connection[:, 3] and j not in connection[:, 4]: connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) if len(connection) >= min(nA, nB): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) return special_k, connection_all def __get_subsets(self, connection_all, special_k, all_peaks): # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones((0, 20)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in range(len(self.configer.get('details', 'limb_seq'))): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(self.configer.get('details', 'limb_seq')[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if (subset[j][indexB] != partBs[i]): subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found and k < 17: row = -1 * np.ones(20) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) return subset, candidate def __draw_key_point(self, subset, all_peaks, img_raw): del_ids = [] for i in range(len(subset)): if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: del_ids.append(i) subset = np.delete(subset, del_ids, axis=0) img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('data', 'num_keypoints')): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'circle_radius'), self.configer.get('details', 'color_list')[i], thickness=-1) return subset, img_canvas def __link_key_point(self, img_canvas, candidate, subset): for i in range(self.configer.get('data', 'num_keypoints')-1): for n in range(len(subset)): index = subset[n][np.array(self.configer.get('details', 'limb_seq')[i]) - 1] if -1 in index: continue cur_canvas = img_canvas.copy() Y = candidate[index.astype(int), 0] X = candidate[index.astype(int), 1] mX = np.mean(X) mY = np.mean(Y) length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), self.configer.get('vis', 'stick_width')), int(angle), 0, 360, 1) cv2.fillConvexPoly(cur_canvas, polygon, self.configer.get('details', 'color_list')[i]) img_canvas = cv2.addWeighted(img_canvas, 0.4, cur_canvas, 0.6, 0) return img_canvas def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'test') if not os.path.exists(base_dir): os.makedirs(base_dir) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) self.__test_img(image_path, save_path) def __create_coco_submission(self, test_dir=None, base_dir=None): out_file = os.path.join(base_dir, 'person_keypoints_test-dev2017_donny_results.json') out_list = list() coco = COCO(os.path.join(test_dir, 'image_info_test-dev2017.json')) for i, img_id in enumerate(list(coco.imgs.keys())): filename = coco.imgs[img_id]['file_name'] image_raw = cv2.imread(os.path.join(test_dir, 'test2017', filename)) paf_avg, heatmap_avg = self.__get_paf_and_heatmap(image_raw) all_peaks = self.__extract_heatmap_info(heatmap_avg) special_k, connection_all = self.__extract_paf_info(image_raw, paf_avg, all_peaks) subset, candidate = self.__get_subsets(connection_all, special_k, all_peaks) subset, img_canvas = self.__draw_key_point(subset, all_peaks, image_raw) for n in range(len(subset)): dict_temp = dict() dict_temp['image_id'] = img_id dict_temp['category_id'] = 1 dict_temp['score'] = subset[n][-2] pose_list = list() for i in range(self.configer.get('data', 'num_keypoints')-1): index = subset[n][self.configer.get('details', 'coco_to_ours')[i]] if index == -1: pose_list.append(0) pose_list.append(0) else: pose_list.append(candidate[index.astype(int)][0]) pose_list.append(candidate[index.astype(int)][1]) pose_list.append(1) dict_temp['keypoints'] = pose_list out_list.append(dict_temp) fw = open(out_file, 'w') fw.write(json.dumps(out_list)) fw.close() def create_submission(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) test_dir = self.configer.get('test_dir') if self.configer.get('dataset') == 'coco': self.__create_coco_submission(test_dir, base_dir) else: Log.error('Dataset: {} is not valid.'.format(self.configer.get('dataset'))) exit(1) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(item): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class OpenPose(object): """ The class for Pose Estimation. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.vis = PoseVisualizer(configer) self.loss_manager = PoseLossManager(configer) self.model_manager = PoseModelManager(configer) self.data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.lr = None self.iters = None def init_model(self): self.pose_net = self.model_manager.pose_detector() self.iters = 0 self.pose_net, _ = self.module_utilizer.load_net(self.pose_net) self.optimizer, self.lr = self.module_utilizer.update_optimizer( self.pose_net, self.iters) if self.configer.get('dataset') == 'coco': self.train_loader = self.data_loader.get_trainloader(OPCocoLoader) self.val_loader = self.data_loader.get_valloader(OPCocoLoader) else: Log.error('Dataset: {} is not valid!'.format( self.configer.get('dataset'))) exit(1) self.mse_loss = self.loss_manager.get_pose_loss('mse_loss') def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, data_tuple in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. if len(data_tuple) < 2: Log.error('Train Loader Error!') exit(0) inputs = Variable(data_tuple[0].cuda(async=True)) heatmap = Variable(data_tuple[1].cuda(async=True)) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True)) # Forward pass. paf_out, heatmap_out = self.pose_net(inputs) self.vis.vis_paf(paf_out, inputs.data.cpu().squeeze().numpy().transpose( 1, 2, 0), name='paf_out') # Compute the loss of the train batch & backward. loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap) loss = loss_heatmap if len(data_tuple) > 3: vecmap = Variable(data_tuple[3].cuda(async=True)) self.vis.vis_paf(vecmap, inputs.data.cpu().squeeze().numpy().transpose( 1, 2, 0), name='paf') loss_associate = self.mse_loss(paf_out, vecmap, maskmap) loss += loss_associate self.train_losses.update(loss.data[0], inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.iters += 1 # Print the log info & reset the states. if self.iters % self.configer.get('solver', 'display_iter') == 0: Log.info( 'Train Iteration: {0}\t' 'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n' 'Learning rate = {2}\n' 'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.iters % self.configer.get('solver', 'test_interval') == 0: self.__val() # Adjust the learning rate after every iteration. self.optimizer, self.lr = self.module_utilizer.update_optimizer( self.pose_net, self.iters) def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() for j, data_tuple in enumerate(self.val_loader): # Change the data type. inputs = Variable(data_tuple[0].cuda(async=True), volatile=True) heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True), volatile=True) # Forward pass. paf_out, heatmap_out = self.pose_net(inputs) # Compute the loss of the val batch. loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap) loss = loss_heatmap if len(data_tuple) > 3: vecmap = Variable(data_tuple[3].cuda(async=True), volatile=True) loss_associate = self.mse_loss(paf_out, vecmap, maskmap) loss = loss_heatmap + loss_associate self.val_losses.update(loss.data[0], inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.pose_net, self.iters) # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train() def train(self): cudnn.benchmark = True while self.iters < self.configer.get('solver', 'max_iter'): self.__train() if self.iters == self.configer.get('solver', 'max_iter'): break
class SingleShotDetector(object): """ The class for Single Shot Detector. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.det_visualizer = DetVisualizer(configer) self.det_loss_manager = DetLossManager(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.ssd_target_generator = SSDTargetGenerator(configer) self.ssd_priorbox_layer = SSDPriorBoxLayer(configer) self.det_running_score = DetRunningScore(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.data_transformer = DataTransformer(configer) self.det_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.det_data_loader.get_trainloader() self.val_loader = self.det_data_loader.get_valloader() self.det_loss = self.det_loss_manager.get_det_loss('ssd_multibox_loss') def _get_parameters(self): return self.det_net.parameters() def warm_lr(self, batch_len): """Sets the learning rate # Adapted from PyTorch Imagenet example: # https://github.com/pytorch/examples/blob/master/imagenet/main.py """ warm_iters = self.configer.get('lr', 'warm')['warm_epoch'] * batch_len warm_lr = self.configer.get('lr', 'warm')['warm_lr'] if self.configer.get('iters') < warm_iters: lr_delta = (self.configer.get('lr', 'base_lr') - warm_lr) * self.configer.get('iters') / warm_iters lr = warm_lr + lr_delta for param_group in self.optimizer.param_groups: param_group['lr'] = lr def __train(self): """ Train function of every epoch during train phase. """ self.det_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, data_dict in enumerate(self.train_loader): if not self.configer.is_empty( 'lr', 'is_warm') and self.configer.get('lr', 'is_warm'): self.warm_lr(len(self.train_loader)) inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] # Change the data type. inputs = self.module_utilizer.to_device(inputs) self.data_time.update(time.time() - start_time) # Forward pass. feat_list, loc, cls = self.det_net(inputs) bboxes, labels = self.ssd_target_generator( feat_list, batch_gt_bboxes, batch_gt_labels, [inputs.size(3), inputs.size(2)]) bboxes, labels = self.module_utilizer.to_device(bboxes, labels) # Compute the loss of the train batch & backward. loss = self.det_loss(loc, bboxes, cls, labels) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ (self.configer.get('iters')) % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.det_net.eval() start_time = time.time() with torch.no_grad(): for j, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] inputs = self.module_utilizer.to_device(inputs) input_size = [inputs.size(3), inputs.size(2)] # Forward pass. feat_list, loc, cls = self.det_net(inputs) bboxes, labels = self.ssd_target_generator( feat_list, batch_gt_bboxes, batch_gt_labels, input_size) bboxes, labels = self.module_utilizer.to_device(bboxes, labels) # Compute the loss of the val batch. loss = self.det_loss(loc, bboxes, cls, labels) self.val_losses.update(loss.item(), inputs.size(0)) batch_detections = SingleShotDetectorTest.decode( loc, cls, self.ssd_priorbox_layer(feat_list, input_size), self.configer, input_size) batch_pred_bboxes = self.__get_object_list(batch_detections) # batch_pred_bboxes = self._get_gt_object_list(batch_gt_bboxes, batch_gt_labels) self.det_running_score.update(batch_pred_bboxes, batch_gt_bboxes, batch_gt_labels) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.det_net, save_mode='iters') # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Val mAP: {}'.format(self.det_running_score.get_mAP())) self.det_running_score.reset() self.batch_time.reset() self.val_losses.reset() self.det_net.train() def _get_gt_object_list(self, batch_gt_bboxes, batch_gt_labels): batch_pred_bboxes = list() for i in range(len(batch_gt_bboxes)): object_list = list() if batch_gt_bboxes[i].numel() > 0: for j in range(batch_gt_bboxes[i].size(0)): object_list.append([ batch_gt_bboxes[i][j][0].item(), batch_gt_bboxes[i][j][1].item(), batch_gt_bboxes[i][j][2].item(), batch_gt_bboxes[i][j][3].item(), batch_gt_labels[i][j].item(), 1.0 ]) batch_pred_bboxes.append(object_list) return batch_pred_bboxes def __get_object_list(self, batch_detections): batch_pred_bboxes = list() for idx, detections in enumerate(batch_detections): object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: xmin = x1.cpu().item() ymin = y1.cpu().item() xmax = x2.cpu().item() ymax = y2.cpu().item() cf = conf.cpu().item() cls_pred = cls_pred.cpu().item() - 1 object_list.append([ xmin, ymin, xmax, ymax, int(cls_pred), float('%.2f' % cf) ]) batch_pred_bboxes.append(object_list) return batch_pred_bboxes def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get( 'network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.seg_vis = SegVisualizer(configer) self.seg_model_manager = SegModelManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.seg_net = None def init_model(self): self.seg_net = self.seg_model_manager.seg_net() self.seg_net, _ = self.module_utilizer.load_net(self.seg_net) self.seg_net.eval() def forward(self, image_path): image = Image.open(image_path).convert('RGB') image = RandomResize(size=self.configer.get('data', 'input_size'), is_base=False)(image) image = ToTensor()(image) image = Normalize(mean=[128.0, 128.0, 128.0], std=[256.0, 256.0, 256.0])(image) inputs = Variable(image.unsqueeze(0).cuda(), volatile=True) results = self.seg_net.forward(inputs) return results.data.cpu().numpy().argmax(axis=1)[0].squeeze() def __test_img(self, image_path, save_path): if self.configer.get('dataset') == 'cityscape': self.__test_cityscape_img(image_path, save_path) elif self.configer.get('dataset') == 'laneline': self.__test_laneline_img(image_path, save_path) else: Log.error('Dataset: {} is not valid.'.format( self.configer.get('dataset'))) exit(1) def __test_cityscape_img(self, img_path, save_path): color_list = [(128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)] result = self.forward(img_path) width = self.configer.get( 'data', 'input_size')[0] // self.configer.get('network', 'stride') height = self.configer.get( 'data', 'input_size')[1] // self.configer.get('network', 'stride') color_dst = np.zeros((height, width, 3), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): color_dst[result == i] = color_list[i] color_img = np.array(color_dst, dtype=np.uint8) color_img = Image.fromarray(color_img, 'RGB') color_img.save(save_path) def __test_laneline_img(self, img_path, save_path): pass def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/seg', self.configer.get('dataset'), 'test') if not os.path.exists(base_dir): os.makedirs(base_dir) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) self.__test_img(image_path, save_path) def __create_cityscape_submission(self, test_dir=None, base_dir=None): label_list = [ 7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33 ] for filename in self.__list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) result = self.forward(image_path) width = self.configer.get('data', 'input_size')[0] // self.configer.get( 'network', 'stride') height = self.configer.get('data', 'input_size')[1] // self.configer.get( 'network', 'stride') label_dst = np.ones((height, width), dtype=np.uint8) * 255 for i in range(self.configer.get('data', 'num_classes')): label_dst[result == i] = label_list[i] label_img = np.array(label_dst, dtype=np.uint8) label_img = Image.fromarray(label_img, 'P') label_img.save(save_path) def create_submission(self, test_dir=None): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/seg', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) if self.configer.get('dataset') == 'cityscape': self.__create_cityscape_submission(test_dir, base_dir) else: Log.error('Dataset: {} is not valid.'.format( self.configer.get('dataset'))) exit(1) def __list_dir(self, dir_name): filename_list = list() for item in os.listdir(dir_name): if os.path.isdir(item): for filename in os.listdir(os.path.join(dir_name, item)): filename_list.append('{}/{}'.format(item, filename)) else: filename_list.append(item) return filename_list
class ConvPoseMachineTest(object): def __init__(self, configer): self.configer = configer self.pose_vis = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None def init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): image_raw = ImageHelper.cv2_open_bgr(image_path) inputs = ImageHelper.bgr2rgb(image_raw) heatmap_avg = self.__get_heatmap(inputs) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_raw) cv2.imwrite(save_path, image_save) def __get_heatmap(self, img_raw): multiplier = [ scale * self.configer.get('data', 'input_size')[0] / img_raw.shape[1] for scale in self.configer.get('data', 'scale_search') ] heatmap_avg = np.zeros((img_raw.shape[0], img_raw.shape[1], self.configer.get('network', 'heatmap_out'))) for i, scale in enumerate(multiplier): img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) img_test_pad, pad = PadImage(self.configer.get( 'network', 'stride'))(img_test) img_test_pad = ToTensor()(img_test_pad) img_test_pad = Normalize( mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(img_test_pad) with torch.no_grad(): img_test_pad = img_test_pad.unsqueeze(0).to(self.device) heatmap_out_list = self.pose_net(img_test_pad) heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.data.squeeze().cpu().numpy().transpose( 1, 2, 0) heatmap = cv2.resize(heatmap, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:img_test_pad.size(2) - pad[3], :img_test_pad.size(3) - pad[2], :] heatmap = cv2.resize(heatmap, (img_raw.shape[1], img_raw.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) return heatmap_avg def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] for part in range(self.configer.get('network', 'heatmap_out') - 1): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip( np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] all_peaks.append(peaks_with_score) return all_peaks def __draw_key_point(self, all_peaks, img_raw): img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('network', 'heatmap_out') - 1): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'stick_width'), self.configer.get('details', 'color_list')[i], thickness=-1) return img_canvas def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/pose', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) val_data_loader = self.pose_data_loader.get_valloader() for i, (inputs, heatmap) in enumerate(val_data_loader): for j in range(inputs.size(0)): ori_img = DeNormalize( mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs[j]) image_raw = ori_img.numpy().transpose(1, 2, 0) image_raw = cv2.cvtColor(image_raw, cv2.COLOR_RGB2BGR) heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0) heatmap_avg = cv2.resize( heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_raw) cv2.imwrite( os.path.join(base_dir, '{}_{}_result.jpg'.format(i, j)), image_save) def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') if not os.path.exists(base_dir): os.makedirs(base_dir) filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) self.__test_img(image_path, save_path) def __create_coco_submission(self, test_dir=None, base_dir=None): pass def create_submission(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset'), 'submission') if not os.path.exists(base_dir): os.makedirs(base_dir) test_dir = self.configer.get('test_dir') if self.configer.get('dataset') == 'coco': self.__create_coco_submission(test_dir) else: Log.error('Dataset: {} is not valid.'.format( self.configer.get('dataset'))) exit(1)
class ConvPoseMachine(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.pose_visualizer = PoseVisualizer(configer) self.loss_manager = PoseLossManager(configer) self.model_manager = PoseModelManager(configer) self.train_utilizer = ModuleUtilizer(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.best_model_loss = None self.is_best = None self.lr = None self.iters = None def init_model(self, train_loader=None, val_loader=None): self.pose_net = self.model_manager.pose_detector() self.pose_net, self.iters = self.train_utilizer.load_net(self.pose_net) self.optimizer = self.train_utilizer.update_optimizer(self.pose_net, self.iters) self.train_loader = train_loader self.val_loader = val_loader self.heatmap_loss = self.loss_manager.get_pose_loss('heatmap_loss') def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # data_tuple: (inputs, heatmap, maskmap, tagmap, num_objects) for i, data_tuple in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. if len(data_tuple) < 2: Log.error('Train Loader Error!') exit(0) inputs = Variable(data_tuple[0].cuda(async=True)) heatmap = Variable(data_tuple[1].cuda(async=True)) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True)) self.pose_visualizer.vis_tensor(heatmap, name='heatmap') self.pose_visualizer.vis_tensor((inputs*256+128)/255, name='image') # Forward pass. outputs = self.pose_net(inputs) self.pose_visualizer.vis_tensor(outputs, name='output') self.pose_visualizer.vis_peaks(inputs, outputs, name='peak') # Compute the loss of the train batch & backward. loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap) loss = loss_heatmap self.train_losses.update(loss.data[0], inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.iters += 1 # Print the log info & reset the states. if self.iters % self.configer.get('solver', 'display_iter') == 0: Log.info('Train Iteration: {0}\t' 'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n' 'Learning rate = {2}\n' 'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format( self.iters, self.configer.get('solver', 'display_iter'), self.lr, batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.iters % self.configer.get('solver', 'test_interval') == 0: self.__val() self.optimizer = self.train_utilizer.update_optimizer(self.pose_net, self.iters) def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() for j, data_tuple in enumerate(self.val_loader): # Change the data type. inputs = Variable(data_tuple[0].cuda(async=True), volatile=True) heatmap = Variable(data_tuple[1].cuda(async=True), volatile=True) maskmap = None if len(data_tuple) > 2: maskmap = Variable(data_tuple[2].cuda(async=True), volatile=True) # Forward pass. outputs = self.pose_net(inputs) self.pose_visualizer.vis_peaks(inputs, outputs, name='peak_val') # Compute the loss of the val batch. loss_heatmap = self.heatmap_loss(outputs, heatmap, maskmap) loss = loss_heatmap self.val_losses.update(loss.data[0], inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format( batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train() def train(self): cudnn.benchmark = True while self.iters < self.configer.get('solver', 'max_iter'): self.__train() if self.iters == self.configer.get('solver', 'max_iter'): break def test(self, img_path=None, img_dir=None): if img_path is not None and os.path.exists(img_path): image = Image.open(img_path).convert('RGB')
class FCNSegmentor(object): """ The class for Pose Estimation. Include train, val, val & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.seg_running_score = SegRunningScore(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_loss_manager = SegLossManager(configer) self.module_utilizer = ModuleUtilizer(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.optim_scheduler = OptimScheduler(configer) self.seg_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None def init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = self.module_utilizer.load_net(self.seg_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.seg_data_loader.get_trainloader() self.val_loader = self.seg_data_loader.get_valloader() self.pixel_loss = self.seg_loss_manager.get_seg_loss( 'cross_entropy_loss') def _get_parameters(self): return self.seg_net.parameters() def __train(self): """ Train function of every epoch during train phase. """ if self.configer.get( 'network', 'resume') is not None and self.configer.get('iters') == 0: self.__val() self.seg_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) for i, (inputs, targets) in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. inputs, targets = self.module_utilizer.to_device(inputs, targets) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the train batch & backward. loss = self.pixel_loss(outputs, targets) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.configer.get('iters') % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.seg_net.eval() start_time = time.time() with torch.no_grad(): for j, (inputs, targets) in enumerate(self.val_loader): # Change the data type. inputs, targets = self.module_utilizer.to_device( inputs, targets) # Forward pass. outputs = self.seg_net(inputs) # Compute the loss of the val batch. loss = self.pixel_loss(outputs, targets) self.val_losses.update(loss.item(), inputs.size(0)) self.seg_running_score.update( outputs.max(1)[1].unsqueeze(1).data, targets.data) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.update_value(['performace'], self.seg_running_score.get_mean_iou()) self.configer.update_value(['val_loss'], self.val_losses.avg) self.module_utilizer.save_net(self.seg_net, metric='performance') self.module_utilizer.save_net(self.seg_net, metric='val_loss') # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Mean IOU: {}'.format( self.seg_running_score.get_mean_iou())) self.batch_time.reset() self.val_losses.reset() self.seg_running_score.reset() self.seg_net.train() def train(self): cudnn.benchmark = True while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class FastRCNNTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.roi_sampler = FRRoiSampleLayer(configer) self.module_utilizer = ModuleUtilizer(configer) self.rpn_target_generator = RPNTargetGenerator(configer) self.fr_priorbox_layer = FRPriorBoxLayer(configer) self.fr_roi_generator = FRRoiGenerator(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) img, scale = BoundResize()(img) inputs = self.blob_helper.make_input(img, scale=1.0) with torch.no_grad(): # Forward pass. test_group = self.det_net(inputs, scale) test_indices_and_rois, test_roi_locs, test_roi_scores, test_rois_num = test_group batch_detections = self.decode(test_roi_locs, test_roi_scores, test_indices_and_rois, test_rois_num, self.configer, ImageHelper.get_size(img)) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr, scale=scale) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(roi_locs, roi_scores, indices_and_rois, test_rois_num, configer, input_size): roi_locs = roi_locs.cpu() roi_scores = roi_scores.cpu() indices_and_rois = indices_and_rois.cpu() num_classes = configer.get('data', 'num_classes') mean = torch.Tensor(configer.get( 'roi', 'loc_normalize_mean')).repeat(num_classes)[None] std = torch.Tensor(configer.get( 'roi', 'loc_normalize_std')).repeat(num_classes)[None] mean = mean.to(roi_locs.device) std = std.to(roi_locs.device) roi_locs = (roi_locs * std + mean) roi_locs = roi_locs.contiguous().view(-1, num_classes, 4) # roi_locs = roi_locs[:,:, [1, 0, 3, 2]] rois = indices_and_rois[:, 1:] rois = rois.contiguous().view(-1, 1, 4).expand_as(roi_locs) wh = torch.exp(roi_locs[:, :, 2:]) * (rois[:, :, 2:] - rois[:, :, :2]) cxcy = roi_locs[:, :, :2] * (rois[:, :, 2:] - rois[:, :, :2]) + ( rois[:, :, :2] + rois[:, :, 2:]) / 2 dst_bbox = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] # clip bounding box dst_bbox[:, :, 0::2] = (dst_bbox[:, :, 0::2]).clamp(min=0, max=input_size[0] - 1) dst_bbox[:, :, 1::2] = (dst_bbox[:, :, 1::2]).clamp(min=0, max=input_size[1] - 1) if configer.get('phase') != 'debug': cls_prob = F.softmax(roi_scores, dim=1) else: cls_prob = roi_scores cls_label = torch.LongTensor([i for i in range(num_classes)])\ .contiguous().view(1, num_classes).repeat(indices_and_rois.size(0), 1) output = [None for _ in range(test_rois_num.size(0))] start_index = 0 for i in range(test_rois_num.size(0)): # batch_index = (indices_and_rois[:, 0] == i).nonzero().contiguous().view(-1,) # tmp_dst_bbox = dst_bbox[batch_index] # tmp_cls_prob = cls_prob[batch_index] # tmp_cls_label = cls_label[batch_index] tmp_dst_bbox = dst_bbox[start_index:start_index + test_rois_num[i]] tmp_cls_prob = cls_prob[start_index:start_index + test_rois_num[i]] tmp_cls_label = cls_label[start_index:start_index + test_rois_num[i]] start_index += test_rois_num[i] mask = (tmp_cls_prob > configer.get( 'vis', 'conf_threshold')) & (tmp_cls_label > 0) tmp_dst_bbox = tmp_dst_bbox[mask].contiguous().view(-1, 4) if tmp_dst_bbox.numel() == 0: continue tmp_cls_prob = tmp_cls_prob[mask].contiguous().view( -1, ).unsqueeze(1) tmp_cls_label = tmp_cls_label[mask].contiguous().view( -1, ).unsqueeze(1) valid_preds = torch.cat( (tmp_dst_bbox, tmp_cls_prob.float(), tmp_cls_label.float()), 1) keep = DetHelper.cls_nms(valid_preds[:, :4], scores=valid_preds[:, 4], labels=valid_preds[:, 5], nms_threshold=configer.get( 'nms', 'overlap_threshold'), iou_mode=configer.get('nms', 'mode')) output[i] = valid_preds[keep] return output def __make_tensor(self, gt_bboxes, gt_labels): len_arr = [gt_labels[i].numel() for i in range(len(gt_bboxes))] batch_maxlen = max(max(len_arr), 1) target_bboxes = torch.zeros((len(gt_bboxes), batch_maxlen, 4)).float() target_labels = torch.zeros((len(gt_bboxes), batch_maxlen)).long() for i in range(len(gt_bboxes)): target_bboxes[i, :len_arr[i], :] = gt_bboxes[i] target_labels[i, :len_arr[i]] = gt_labels[i] target_bboxes_num = torch.Tensor(len_arr).long() return target_bboxes, target_bboxes_num, target_labels def __get_info_tree(self, detections, image_raw, scale=1.0): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = min(x1.cpu().item() / scale, width - 1) ymin = min(y1.cpu().item() / scale, height - 1) xmax = min(x2.cpu().item() / scale, width - 1) ymax = min(y2.cpu().item() / scale, height - 1) object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): img_scale = data_dict['imgscale'] inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] # batch_gt_bboxes = ResizeBoxes()(inputs, data_dict['bboxes']) batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('rpn', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) gt_rpn_locs, gt_rpn_labels = self.rpn_target_generator( feat_list, batch_gt_bboxes, input_size) eye_matrix = torch.eye(2) gt_rpn_labels[gt_rpn_labels == -1] = 0 gt_rpn_scores = eye_matrix[gt_rpn_labels.view(-1)].view( inputs.size(0), -1, 2) test_indices_and_rois, _ = self.fr_roi_generator( feat_list, gt_rpn_locs, gt_rpn_scores, self.configer.get('rpn', 'n_test_pre_nms'), self.configer.get('rpn', 'n_test_post_nms'), input_size, img_scale) gt_bboxes, gt_nums, gt_labels = self.__make_tensor( batch_gt_bboxes, batch_gt_labels) sample_rois, gt_roi_locs, gt_roi_labels = self.roi_sampler( test_indices_and_rois, gt_bboxes, gt_nums, gt_labels, input_size) self.det_visualizer.vis_rois(inputs, sample_rois[gt_roi_labels > 0]) gt_cls_roi_locs = torch.zeros( (gt_roi_locs.size(0), self.configer.get('data', 'num_classes'), 4)) gt_cls_roi_locs[torch.arange(0, sample_rois.size(0)).long(), gt_roi_labels.long()] = gt_roi_locs gt_cls_roi_locs = gt_cls_roi_locs.contiguous().view( -1, 4 * self.configer.get('data', 'num_classes')) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) gt_roi_scores = eye_matrix[gt_roi_labels.view(-1)].view( gt_roi_labels.size(0), self.configer.get('data', 'num_classes')) test_rois_num = torch.zeros((len(gt_bboxes), )).long() for batch_id in range(len(gt_bboxes)): batch_index = ( sample_rois[:, 0] == batch_id).nonzero().contiguous().view( -1, ) test_rois_num[batch_id] = batch_index.numel() batch_detections = FastRCNNTest.decode(gt_cls_roi_locs, gt_roi_scores, sample_rois, test_rois_num, self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.fr_priorbox_layer(feat_list, input_size), gt_rpn_labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.seg_visualizer = SegVisualizer(configer) self.seg_parser = SegParser(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.data_transformer = DataTransformer(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.seg_net = None self._init_model() def _init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = self.module_utilizer.load_net(self.seg_net) self.seg_net.eval() def __test_img(self, image_path, label_path, vis_path, raw_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) total_logits = np.zeros((ori_height, ori_width, self.configer.get('data', 'num_classes')), np.float32) for scale in self.configer.get('test', 'scale_search'): image = self.blob_helper.make_input(image=ori_image, input_size=self.configer.get('test', 'input_size'), scale=scale) if self.configer.get('test', 'crop_test'): crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) else: results = self._predict(image) results = cv2.resize(results, (ori_width, ori_height), interpolation=cv2.INTER_LINEAR) total_logits += results if self.configer.get('test', 'mirror'): if self.configer.get('data', 'image_tool') == 'cv2': image = cv2.flip(ori_image, 1) else: image = ori_image.transpose(Image.FLIP_LEFT_RIGHT) image = self.blob_helper.make_input(image, input_size=self.configer.get('test', 'input_size'), scale=1.0) if self.configer.get('test', 'crop_test'): crop_size = self.configer.get('test', 'crop_size') if image.size()[3] > crop_size[0] and image.size()[2] > crop_size[1]: results = self._crop_predict(image, crop_size) else: results = self._predict(image) else: results = self._predict(image) results = cv2.resize(results[:, ::-1], (ori_width, ori_height), interpolation=cv2.INTER_LINEAR) total_logits += results label_map = np.argmax(total_logits, axis=-1) label_img = np.array(label_map, dtype=np.uint8) image_bgr = cv2.cvtColor(np.array(ori_image), cv2.COLOR_RGB2BGR) image_canvas = self.seg_parser.colorize(label_img, image_canvas=image_bgr) ImageHelper.save(image_canvas, save_path=vis_path) ImageHelper.save(ori_image, save_path=raw_path) if not self.configer.is_empty('data', 'label_list'): label_img = self.__relabel(label_img) label_img = Image.fromarray(label_img, 'P') Log.info('Label Path: {}'.format(label_path)) ImageHelper.save(label_img, label_path) def _crop_predict(self, image, crop_size): height, width = image.size()[2:] np_image = image.squeeze(0).permute(1, 2, 0).cpu().numpy() height_starts = self._decide_intersection(height, crop_size[1]) width_starts = self._decide_intersection(width, crop_size[0]) split_crops = [] for height in height_starts: for width in width_starts: image_crop = np_image[height:height + crop_size[1], width:width + crop_size[0]] split_crops.append(image_crop[np.newaxis, :]) split_crops = np.concatenate(split_crops, axis=0) # (n, crop_image_size, crop_image_size, 3) inputs = torch.from_numpy(split_crops).permute(0, 3, 1, 2).to(self.device) with torch.no_grad(): results = self.seg_net.forward(inputs) results = results[0].permute(0, 2, 3, 1).cpu().numpy() reassemble = np.zeros((np_image.shape[0], np_image.shape[1], results.shape[-1]), np.float32) index = 0 for height in height_starts: for width in width_starts: reassemble[height:height+crop_size[1], width:width+crop_size[0]] += results[index] index += 1 return reassemble def _decide_intersection(self, total_length, crop_length): stride = int(crop_length * self.configer.get('test', 'crop_stride_ratio')) # set the stride as the paper do times = (total_length - crop_length) // stride + 1 cropped_starting = [] for i in range(times): cropped_starting.append(stride*i) if total_length - cropped_starting[-1] > crop_length: cropped_starting.append(total_length - crop_length) # must cover the total image return cropped_starting def _predict(self, inputs): with torch.no_grad(): results = self.seg_net.forward(inputs) results = results[0].squeeze().permute(1, 2, 0).cpu().numpy() return results def __relabel(self, label_map): height, width = label_map.shape label_dst = np.zeros((height, width), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): label_dst[label_map == i] = self.configer.get('data', 'label_list')[i] label_dst = np.array(label_dst, dtype=np.uint8) return label_dst def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/seg', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] label_path = os.path.join(base_dir, 'label', '{}.png'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(label_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, label_path, vis_path, raw_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) label_path = os.path.join(base_dir, 'label', '{}.png'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join(base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(label_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, label_path, vis_path, raw_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/seg', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.seg_data_loader.get_trainloader()): inputs = data_dict['img'] targets = data_dict['labelmap'] for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) image_bgr = self.blob_helper.tensor2bgr(inputs[j]) label_map = targets[j].numpy() image_canvas = self.seg_parser.colorize(label_map, image_canvas=image_bgr) cv2.imwrite(os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCClassifier(object): """ The class for the training phase of Image classification. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.cls_loss_manager = ClsLossManager(configer) self.cls_model_manager = ClsModelManager(configer) self.cls_data_loader = ClsDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.cls_running_score = ClsRunningScore(configer) self.cls_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.cls_net = self.cls_model_manager.image_classifier() self.cls_net = self.module_utilizer.load_net(self.cls_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.cls_data_loader.get_trainloader() self.val_loader = self.cls_data_loader.get_valloader() self.ce_loss = self.cls_loss_manager.get_cls_loss('cross_entropy_loss') def _get_parameters(self): return self.cls_net.parameters() def __train(self): """ Train function of every epoch during train phase. """ self.cls_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) for i, data_dict in enumerate(self.train_loader): inputs = data_dict['img'] labels = data_dict['label'] self.data_time.update(time.time() - start_time) # Change the data type. inputs, labels = self.module_utilizer.to_device(inputs, labels) # Forward pass. outputs = self.cls_net(inputs) outputs = self.module_utilizer.gather(outputs) # Compute the loss of the train batch & backward. loss = self.ce_loss(outputs, labels) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.configer.get('iters') % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.cls_net.eval() start_time = time.time() with torch.no_grad(): for j, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] labels = data_dict['label'] # Change the data type. inputs, labels = self.module_utilizer.to_device(inputs, labels) # Forward pass. outputs = self.cls_net(inputs) outputs = self.module_utilizer.gather(outputs) # Compute the loss of the val batch. loss = self.ce_loss(outputs, labels) self.cls_running_score.update(outputs, labels) self.val_losses.update(loss.item(), inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.cls_net, save_mode='iters') # Print the log info & reset the states. Log.info('Test Time {batch_time.sum:.3f}s'.format( batch_time=self.batch_time)) Log.info('TestLoss = {loss.avg:.8f}'.format(loss=self.val_losses)) Log.info('Top1 ACC = {}'.format( self.cls_running_score.get_top1_acc())) Log.info('Top5 ACC = {}'.format( self.cls_running_score.get_top5_acc())) self.batch_time.reset() self.val_losses.reset() self.cls_running_score.reset() self.cls_net.train() def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get( 'network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class ConvPoseMachineTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.pose_vis = PoseVisualizer(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.data_transformer = DataTransformer(configer) self.heatmap_generator = HeatmapGenerator(configer) self.device = torch.device('cpu' if self.configer.get('gpu') is None else 'cuda') self.pose_net = None self._init_model() def _init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.pose_net.eval() def __test_img(self, image_path, save_path): Log.info('Image Path: {}'.format(image_path)) ori_image = ImageHelper.read_image(image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_width, ori_height = ImageHelper.get_size(ori_image) ori_img_bgr = ImageHelper.get_cv2_bgr(ori_image, mode=self.configer.get('data', 'input_mode')) heatmap_avg = np.zeros((ori_height, ori_width, self.configer.get('network', 'heatmap_out'))) for i, scale in enumerate(self.configer.get('test', 'scale_search')): image = self.blob_helper.make_input(ori_image, input_size=self.configer.get('test', 'input_size'), scale=scale) with torch.no_grad(): heatmap_out_list = self.pose_net(image) heatmap_out = heatmap_out_list[-1] # extract outputs, resize, and remove padding heatmap = heatmap_out.squeeze(0).cpu().numpy().transpose(1, 2, 0) heatmap = cv2.resize(heatmap, (ori_width, ori_height), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(self.configer.get('test', 'scale_search')) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_canvas = self.__draw_key_point(all_peaks, ori_img_bgr) ImageHelper.save(image_canvas, save_path) def __extract_heatmap_info(self, heatmap_avg): all_peaks = [] for part in range(self.configer.get('network', 'heatmap_out') - 1): map_ori = heatmap_avg[:, :, part] map_gau = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map_gau.shape) map_left[1:, :] = map_gau[:-1, :] map_right = np.zeros(map_gau.shape) map_right[:-1, :] = map_gau[1:, :] map_up = np.zeros(map_gau.shape) map_up[:, 1:] = map_gau[:, :-1] map_down = np.zeros(map_gau.shape) map_down[:, :-1] = map_gau[:, 1:] peaks_binary = np.logical_and.reduce( (map_gau >= map_left, map_gau >= map_right, map_gau >= map_up, map_gau >= map_down, map_gau > self.configer.get('vis', 'part_threshold'))) peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse peaks = list(peaks) peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] all_peaks.append(peaks_with_score) return all_peaks def __draw_key_point(self, all_peaks, img_raw): img_canvas = img_raw.copy() # B,G,R order for i in range(self.configer.get('network', 'heatmap_out') - 1): for j in range(len(all_peaks[i])): cv2.circle(img_canvas, all_peaks[i][j][0:2], self.configer.get('vis', 'stick_width'), self.configer.get('details', 'color_list')[i], thickness=-1) return img_canvas def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/pose', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) for i, data_dict in enumerate(self.pose_data_loader.get_trainloader()): inputs = data_dict['img'] input_size = [inputs.size(3), inputs.size(2)] heatmap = self.heatmap_generator(data_dict['kpts'], input_size) for j in range(inputs.size(0)): image_bgr = self.blob_helper.tensor2bgr(inputs[j]) heatmap_avg = heatmap[j].numpy().transpose(1, 2, 0) heatmap_avg = cv2.resize(heatmap_avg, (0, 0), fx=self.configer.get('network', 'stride'), fy=self.configer.get('network', 'stride'), interpolation=cv2.INTER_CUBIC) all_peaks = self.__extract_heatmap_info(heatmap_avg) image_save = self.__draw_key_point(all_peaks, image_bgr) cv2.imwrite(os.path.join(base_dir, '{}_{}_result.jpg'.format(i, j)), image_save) def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/pose', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') if not os.path.exists(base_dir): os.makedirs(base_dir) filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) self.__test_img(image_path, save_path)
class YOLOv3(object): """ The class for YOLO v3. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.det_visualizer = DetVisualizer(configer) self.det_loss_manager = DetLossManager(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.yolo_detection_layer = YOLODetectionLayer(configer) self.yolo_target_generator = YOLOTargetGenerator(configer) self.det_running_score = DetRunningScore(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.det_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.det_data_loader.get_trainloader() self.val_loader = self.det_data_loader.get_valloader() self.det_loss = self.det_loss_manager.get_det_loss('yolov3_loss') def _get_parameters(self): lr_1 = [] lr_10 = [] params_dict = dict(self.det_net.named_parameters()) for key, value in params_dict.items(): if 'backbone.' not in key: lr_10.append(value) else: lr_1.append(value) params = [{ 'params': lr_1, 'lr': self.configer.get('lr', 'base_lr') }, { 'params': lr_10, 'lr': self.configer.get('lr', 'base_lr') * 10. }] return params def warm_lr(self, batch_len): """Sets the learning rate # Adapted from PyTorch Imagenet example: # https://github.com/pytorch/examples/blob/master/imagenet/main.py """ warm_iters = self.configer.get('lr', 'warm')['warm_epoch'] * batch_len if self.configer.get('iters') < warm_iters: lr_ratio = (self.configer.get('iters') + 1) / warm_iters base_lr_list = self.scheduler.get_lr() for param_group, base_lr in zip(self.optimizer.param_groups, base_lr_list): param_group['lr'] = base_lr * lr_ratio if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info('LR: {}'.format([ param_group['lr'] for param_group in self.optimizer.param_groups ])) def __train(self): """ Train function of every epoch during train phase. """ self.det_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, data_dict in enumerate(self.train_loader): if not self.configer.is_empty( 'lr', 'is_warm') and self.configer.get('lr', 'is_warm'): self.warm_lr(len(self.train_loader)) inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] self.data_time.update(time.time() - start_time) # Change the data type. inputs = self.module_utilizer.to_device(inputs) # Forward pass. feat_list, predictions, _ = self.det_net(inputs) targets, objmask, noobjmask = self.yolo_target_generator( feat_list, batch_gt_bboxes, batch_gt_labels, input_size) targets, objmask, noobjmask = self.module_utilizer.to_device( targets, objmask, noobjmask) # Compute the loss of the train batch & backward. loss = self.det_loss(predictions, targets, objmask, noobjmask) self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ (self.configer.get('iters')) % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.det_net.eval() start_time = time.time() with torch.no_grad(): for i, data_dict in enumerate(self.val_loader): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] # Forward pass. inputs = self.module_utilizer.to_device(inputs) feat_list, predictions, detections = self.det_net(inputs) targets, objmask, noobjmask = self.yolo_target_generator( feat_list, batch_gt_bboxes, batch_gt_labels, input_size) targets, objmask, noobjmask = self.module_utilizer.to_device( targets, objmask, noobjmask) # Compute the loss of the val batch. loss = self.det_loss(predictions, targets, objmask, noobjmask) self.val_losses.update(loss.item(), inputs.size(0)) batch_detections = YOLOv3Test.decode(detections, self.configer) batch_pred_bboxes = self.__get_object_list( batch_detections, input_size) self.det_running_score.update(batch_pred_bboxes, batch_gt_bboxes, batch_gt_labels) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.det_net, save_mode='iters') # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) Log.info('Val mAP: {}'.format(self.det_running_score.get_mAP())) self.det_running_score.reset() self.batch_time.reset() self.val_losses.reset() self.det_net.train() def __get_object_list(self, batch_detections, input_size): batch_pred_bboxes = list() for idx, detections in enumerate(batch_detections): object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections: xmin = x1.cpu().item() * input_size[0] ymin = y1.cpu().item() * input_size[1] xmax = x2.cpu().item() * input_size[0] ymax = y2.cpu().item() * input_size[1] cf = conf.cpu().item() cls_pred = cls_pred.cpu().item() object_list.append([ xmin, ymin, xmax, ymax, int(cls_pred), float('%.2f' % cf) ]) batch_pred_bboxes.append(object_list) return batch_pred_bboxes def train(self): cudnn.benchmark = True if self.configer.get('network', 'resume') is not None and self.configer.get( 'network', 'resume_val'): self.__val() while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class FCClassifierTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.cls_model_manager = ClsModelManager(configer) self.cls_data_loader = ClsDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.cls_parser = ClsParser(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.cls_net = None if self.configer.get('dataset') == 'imagenet': with open( os.path.join( self.configer.get('project_dir'), 'datasets/cls/imagenet/imagenet_class_index.json') ) as json_stream: name_dict = json.load(json_stream) name_seq = [ name_dict[str(i)][1] for i in range(self.configer.get('data', 'num_classes')) ] self.configer.add_key_value(['details', 'name_seq'], name_seq) self._init_model() def _init_model(self): self.cls_net = self.cls_model_manager.image_classifier() self.cls_net = self.module_utilizer.load_net(self.cls_net) self.cls_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) trans = None if self.configer.get('dataset') == 'imagenet': if self.configer.get('data', 'image_tool') == 'cv2': img = Image.fromarray(img) trans = transforms.Compose([ transforms.Scale(256), transforms.CenterCrop(224), ]) assert trans is not None img = trans(img) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): outputs = self.cls_net(inputs) json_dict = self.__get_info_tree(outputs, image_path) image_canvas = self.cls_parser.draw_label(ori_img_bgr.copy(), json_dict['label']) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict def __get_info_tree(self, outputs, image_path=None): json_dict = dict() if image_path is not None: json_dict['image_path'] = image_path topk = (1, 3, 5) maxk = max(topk) _, pred = outputs.topk(maxk, 1, True, True) pred = pred.t() for k in topk: if k == 1: json_dict['label'] = pred[0][0] else: json_dict['label_top{}'.format(k)] = pred[0][:k] return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/cls', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/cls', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.cls_data_loader.get_trainloader()): inputs = data_dict['img'] labels = data_dict['label'] eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view( inputs.size(0), self.configer.get('data', 'num_classes')) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) json_dict = self.__get_info_tree(labels_target) image_canvas = self.cls_parser.draw_label( ori_img_bgr.copy(), json_dict['label']) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class FCNSegmentorTest(object): def __init__(self, configer): self.configer = configer self.seg_visualizer = SegVisualizer(configer) self.seg_parser = SegParser(configer) self.seg_model_manager = SegModelManager(configer) self.seg_data_loader = SegDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.seg_net = None def init_model(self): self.seg_net = self.seg_model_manager.semantic_segmentor() self.seg_net = self.module_utilizer.load_net(self.seg_net) self.seg_net.eval() def __test_img(self, image_path, save_path): image = ImageHelper.pil_open_rgb(image_path) ori_width, ori_height = image.size image = Scale(size=self.configer.get('data', 'input_size'))(image) image = ToTensor()(image) image = Normalize(mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(image) with torch.no_grad(): inputs = image.unsqueeze(0).to(self.device) results = self.seg_net.forward(inputs) label_map = results.data.cpu().numpy().argmax(axis=1)[0].squeeze() label_img = np.array(label_map, dtype=np.uint8) if not self.configer.is_empty('details', 'label_list'): label_img = self.__relabel(label_img) label_img = Image.fromarray(label_img, 'P') label_img = label_img.resize((ori_width, ori_height), Image.NEAREST) label_img.save(save_path) def __relabel(self, label_map): height, width = label_map.shape label_dst = np.zeros((height, width), dtype=np.uint8) for i in range(self.configer.get('data', 'num_classes')): label_dst[label_map == i] = self.configer.get( 'details', 'label_list')[i] label_dst = np.array(label_dst, dtype=np.uint8) return label_dst def test(self): base_dir = os.path.join(self.configer.get('output_dir'), 'val/results/seg', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') if not os.path.exists(base_dir): os.makedirs(base_dir) filename = test_img.rstrip().split('/')[-1] save_path = os.path.join(base_dir, filename) self.__test_img(test_img, save_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) save_path = os.path.join(base_dir, filename) if not os.path.exists(os.path.dirname(save_path)): os.makedirs(os.path.dirname(save_path)) self.__test_img(image_path, save_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/seg', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) val_data_loader = self.seg_data_loader.get_valloader() count = 0 for i, (inputs, targets) in enumerate(val_data_loader): for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img = DeNormalize( mean=self.configer.get('trans_params', 'mean'), std=self.configer.get('trans_params', 'std'))(inputs[j]) ori_img = ori_img.numpy().transpose(1, 2, 0).astype(np.uint8) image_bgr = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR) label_map = targets[j].numpy() image_canvas = self.seg_parser.colorize(label_map, image_canvas=image_bgr) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class SingleShotDetectorTest(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.data_transformer = DataTransformer(configer) self.ssd_priorbox_layer = SSDPriorBoxLayer(configer) self.ssd_target_generator = SSDTargetGenerator(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'test', 'input_size'), scale=1.0) with torch.no_grad(): feat_list, bbox, cls = self.det_net(inputs) batch_detections = self.decode( bbox, cls, self.ssd_priorbox_layer(feat_list, self.configer.get('test', 'input_size')), self.configer, [inputs.size(3), inputs.size(2)]) json_dict = self.__get_info_tree( batch_detections[0], ori_img_bgr, [inputs.size(3), inputs.size(2)]) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite(vis_path, image_canvas) cv2.imwrite(raw_path, ori_img_bgr) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(bbox, conf, default_boxes, configer, input_size): loc = bbox.cpu() if configer.get('phase') != 'debug': conf = F.softmax(conf.cpu(), dim=-1) default_boxes = default_boxes.unsqueeze(0).repeat(loc.size(0), 1, 1) variances = [0.1, 0.2] wh = torch.exp(loc[:, :, 2:] * variances[1]) * default_boxes[:, :, 2:] cxcy = loc[:, :, :2] * variances[ 0] * default_boxes[:, :, 2:] + default_boxes[:, :, :2] boxes = torch.cat([cxcy - wh / 2, cxcy + wh / 2], 2) # [b, 8732,4] batch_size, num_priors, _ = boxes.size() boxes = boxes.unsqueeze(2).repeat(1, 1, configer.get('data', 'num_classes'), 1) boxes = boxes.contiguous().view(boxes.size(0), -1, 4) # clip bounding box boxes[:, :, 0::2] = boxes[:, :, 0::2].clamp(min=0, max=input_size[0] - 1) boxes[:, :, 1::2] = boxes[:, :, 1::2].clamp(min=0, max=input_size[1] - 1) labels = torch.Tensor([ i for i in range(configer.get('data', 'num_classes')) ]).to(boxes.device) labels = labels.view(1, 1, -1, 1).repeat(batch_size, num_priors, 1, 1).contiguous().view(batch_size, -1, 1) max_conf = conf.contiguous().view(batch_size, -1, 1) # max_conf, labels = conf.max(2, keepdim=True) # [b, 8732,1] predictions = torch.cat((boxes, max_conf.float(), labels.float()), 2) output = [None for _ in range(len(predictions))] for image_i, image_pred in enumerate(predictions): ids = labels[image_i].squeeze(1).nonzero().contiguous().view(-1, ) if ids.numel() == 0: continue valid_preds = image_pred[ids] valid_preds = valid_preds[ valid_preds[:, 4] > configer.get('vis', 'conf_threshold')] if valid_preds.numel() == 0: continue keep = DetHelper.cls_nms( valid_preds[:, :4], scores=valid_preds[:, 4], labels=valid_preds[:, 5], nms_threshold=configer.get('nms', 'max_threshold'), iou_mode=configer.get('nms', 'mode'), cls_keep_num=configer.get('vis', 'cls_keep_num')) valid_preds = valid_preds[keep] _, order = valid_preds[:, 4].sort(0, descending=True) order = order[:configer.get('vis', 'max_per_image')] output[image_i] = valid_preds[order] return output def __get_info_tree(self, detections, image_raw, input_size): height, width, _ = image_raw.shape in_width, in_height = input_size json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_pred in detections: object_dict = dict() xmin = x1.cpu().item() / in_width * width ymin = y1.cpu().item() / in_height * height xmax = x2.cpu().item() / in_width * width ymax = y2.cpu().item() / in_height * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) - 1 object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) FileHelper.make_dirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('network', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) bboxes, labels = self.ssd_target_generator(feat_list, batch_gt_bboxes, batch_gt_labels, input_size) eye_matrix = torch.eye(self.configer.get('data', 'num_classes')) labels_target = eye_matrix[labels.view(-1)].view( inputs.size(0), -1, self.configer.get('data', 'num_classes')) batch_detections = self.decode( bboxes, labels_target, self.ssd_priorbox_layer(feat_list, input_size), self.configer, input_size) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) self.det_visualizer.vis_default_bboxes( ori_img_bgr, self.ssd_priorbox_layer(feat_list, input_size), labels[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr, input_size) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()
class RPNPose(object): """ The class for Pose Estimation. Include train, val, test & predict. """ def __init__(self, configer): self.configer = configer self.batch_time = AverageMeter() self.data_time = AverageMeter() self.train_losses = AverageMeter() self.val_losses = AverageMeter() self.val_loss_heatmap = AverageMeter() self.val_loss_associate = AverageMeter() self.pose_visualizer = PoseVisualizer(configer) self.pose_loss_manager = PoseLossManager(configer) self.pose_model_manager = PoseModelManager(configer) self.pose_data_loader = PoseDataLoader(configer) self.module_utilizer = ModuleUtilizer(configer) self.optim_scheduler = OptimScheduler(configer) self.pose_net = None self.train_loader = None self.val_loader = None self.optimizer = None self.scheduler = None def init_model(self): self.pose_net = self.pose_model_manager.multi_pose_detector() self.pose_net = self.module_utilizer.load_net(self.pose_net) self.optimizer, self.scheduler = self.optim_scheduler.init_optimizer( self._get_parameters()) self.train_loader = self.pose_data_loader.get_trainloader() self.val_loader = self.pose_data_loader.get_valloader() self.mse_loss = self.pose_loss_manager.get_pose_loss('mse_loss') self.embeding_loss = self.pose_loss_manager.get_pose_loss( 'embedding_loss') def _get_parameters(self): return self.pose_net.parameters() def __train(self): """ Train function of every epoch during train phase. """ self.pose_net.train() start_time = time.time() # Adjust the learning rate after every epoch. self.configer.plus_one('epoch') self.scheduler.step(self.configer.get('epoch')) # data_tuple: (inputs, heatmap, maskmap, vecmap) for i, (inputs, label, heatmap, maskmap, vecmap, tagmap, num_objects) in enumerate(self.train_loader): self.data_time.update(time.time() - start_time) # Change the data type. inputs, label, heatmap, maskmap, vecmap, tagmap = self.module_utilizer.to_device( inputs, label, heatmap, maskmap, vecmap, tagmap) # Forward pass. paf_out, heatmap_out, embed_out = self.pose_net(inputs) # Compute the loss of the train batch & backward. loss_label = self.mse_loss(embed_out.sum(1).squeeze(), label) loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap) loss_paf = self.mse_loss(paf_out, vecmap, maskmap) loss_associate = self.embeding_loss(embed_out, tagmap, num_objects) loss = loss_label + loss_heatmap + loss_paf + loss_associate self.train_losses.update(loss.item(), inputs.size(0)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # Update the vars of the train phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.configer.plus_one('iters') # Print the log info & reset the states. if self.configer.get('iters') % self.configer.get( 'solver', 'display_iter') == 0: Log.info( 'Train Epoch: {0}\tTrain Iteration: {1}\t' 'Time {batch_time.sum:.3f}s / {2}iters, ({batch_time.avg:.3f})\t' 'Data load {data_time.sum:.3f}s / {2}iters, ({data_time.avg:3f})\n' 'Learning rate = {3}\tLoss = {loss.val:.8f} (ave = {loss.avg:.8f})\n' .format(self.configer.get('epoch'), self.configer.get('iters'), self.configer.get('solver', 'display_iter'), self.scheduler.get_lr(), batch_time=self.batch_time, data_time=self.data_time, loss=self.train_losses)) self.batch_time.reset() self.data_time.reset() self.train_losses.reset() # Check to val the current model. if self.val_loader is not None and \ self.configer.get('iters') % self.configer.get('solver', 'test_interval') == 0: self.__val() def __val(self): """ Validation function during the train phase. """ self.pose_net.eval() start_time = time.time() with torch.no_grad(): for j, (inputs, label, heatmap, maskmap, vecmap, tagmap, num_objects) in enumerate(self.val_loader): # Change the data type. inputs, label, heatmap, maskmap, vecmap, tagmap = self.module_utilizer.to_device( inputs, label, heatmap, maskmap, vecmap, tagmap) # Forward pass. paf_out, heatmap_out, embed_out = self.pose_net(inputs) # Compute the loss of the val batch. loss_label = self.mse_loss(embed_out, label) loss_heatmap = self.mse_loss(heatmap_out, heatmap, maskmap) loss_paf = self.mse_loss(paf_out, vecmap, maskmap) loss_associate = self.embeding_loss(embed_out, tagmap, num_objects) loss = loss_label + loss_heatmap + loss_paf + loss_associate self.val_losses.update(loss.item(), inputs.size(0)) self.val_loss_heatmap.update(loss_heatmap.item(), inputs.size(0)) self.val_loss_associate.update(loss_associate.item(), inputs.size(0)) # Update the vars of the val phase. self.batch_time.update(time.time() - start_time) start_time = time.time() self.module_utilizer.save_net(self.pose_net, metric='iters') Log.info('Loss Heatmap:{}, Loss Asso: {}'.format( self.val_loss_heatmap.avg, self.val_loss_associate.avg)) # Print the log info & reset the states. Log.info( 'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t' 'Loss {loss.avg:.8f}\n'.format(batch_time=self.batch_time, loss=self.val_losses)) self.batch_time.reset() self.val_losses.reset() self.pose_net.train() def train(self): cudnn.benchmark = True while self.configer.get('epoch') < self.configer.get( 'solver', 'max_epoch'): self.__train() if self.configer.get('epoch') == self.configer.get( 'solver', 'max_epoch'): break
class YOLOv3Test(object): def __init__(self, configer): self.configer = configer self.blob_helper = BlobHelper(configer) self.det_visualizer = DetVisualizer(configer) self.det_parser = DetParser(configer) self.det_model_manager = DetModelManager(configer) self.det_data_loader = DetDataLoader(configer) self.yolo_target_generator = YOLOTargetGenerator(configer) self.module_utilizer = ModuleUtilizer(configer) self.data_transformer = DataTransformer(configer) self.yolo_detection_layer = YOLODetectionLayer(configer) self.device = torch.device( 'cpu' if self.configer.get('gpu') is None else 'cuda') self.det_net = None self._init_model() def _init_model(self): self.det_net = self.det_model_manager.object_detector() self.det_net = self.module_utilizer.load_net(self.det_net) self.det_net.eval() def __test_img(self, image_path, json_path, raw_path, vis_path): Log.info('Image Path: {}'.format(image_path)) img = ImageHelper.read_image( image_path, tool=self.configer.get('data', 'image_tool'), mode=self.configer.get('data', 'input_mode')) ori_img_bgr = ImageHelper.get_cv2_bgr(img, mode=self.configer.get( 'data', 'input_mode')) inputs = self.blob_helper.make_input(img, input_size=self.configer.get( 'data', 'input_size'), scale=1.0) with torch.no_grad(): inputs = inputs.unsqueeze(0).to(self.device) _, _, detections = self.det_net(inputs) batch_detections = self.decode(detections, self.configer) json_dict = self.__get_info_tree(batch_detections[0], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'conf_threshold')) ImageHelper.save(ori_img_bgr, raw_path) ImageHelper.save(image_canvas, vis_path) Log.info('Json Path: {}'.format(json_path)) JsonHelper.save_file(json_dict, json_path) return json_dict @staticmethod def decode(batch_pred_bboxes, configer): box_corner = batch_pred_bboxes.new(batch_pred_bboxes.shape) box_corner[:, :, 0] = batch_pred_bboxes[:, :, 0] - batch_pred_bboxes[:, :, 2] / 2 box_corner[:, :, 1] = batch_pred_bboxes[:, :, 1] - batch_pred_bboxes[:, :, 3] / 2 box_corner[:, :, 2] = batch_pred_bboxes[:, :, 0] + batch_pred_bboxes[:, :, 2] / 2 box_corner[:, :, 3] = batch_pred_bboxes[:, :, 1] + batch_pred_bboxes[:, :, 3] / 2 # clip bounding box box_corner[:, :, 0::2] = box_corner[:, :, 0::2].clamp(min=0, max=1.0) box_corner[:, :, 1::2] = box_corner[:, :, 1::2].clamp(min=0, max=1.0) batch_pred_bboxes[:, :, :4] = box_corner[:, :, :4] output = [None for _ in range(len(batch_pred_bboxes))] for image_i, image_pred in enumerate(batch_pred_bboxes): # Filter out confidence scores below threshold conf_mask = (image_pred[:, 4] > configer.get( 'vis', 'obj_threshold')).squeeze() image_pred = image_pred[conf_mask] # If none are remaining => process next image if image_pred.numel() == 0: continue # Get score and class with highest confidence class_conf, class_pred = torch.max( image_pred[:, 5:5 + configer.get('data', 'num_classes')], 1, keepdim=True) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) detections = torch.cat( (image_pred[:, :5], class_conf.float(), class_pred.float()), 1) keep_index = DetHelper.cls_nms( image_pred[:, :4], scores=image_pred[:, 4], labels=class_pred.squeeze(1), nms_threshold=configer.get('nms', 'max_threshold'), iou_mode=configer.get('nms', 'mode'), nms_mode='cython_nms') output[image_i] = detections[keep_index] return output def __get_info_tree(self, detections, image_raw): height, width, _ = image_raw.shape json_dict = dict() object_list = list() if detections is not None: for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections: object_dict = dict() xmin = x1.cpu().item() * width ymin = y1.cpu().item() * height xmax = x2.cpu().item() * width ymax = y2.cpu().item() * height object_dict['bbox'] = [xmin, ymin, xmax, ymax] object_dict['label'] = int(cls_pred.cpu().item()) object_dict['score'] = float('%.2f' % conf.cpu().item()) object_list.append(object_dict) json_dict['objects'] = object_list return json_dict def test(self): base_dir = os.path.join(self.configer.get('project_dir'), 'val/results/det', self.configer.get('dataset')) test_img = self.configer.get('test_img') test_dir = self.configer.get('test_dir') if test_img is None and test_dir is None: Log.error('test_img & test_dir not exists.') exit(1) if test_img is not None and test_dir is not None: Log.error('Either test_img or test_dir.') exit(1) if test_img is not None: base_dir = os.path.join(base_dir, 'test_img') filename = test_img.rstrip().split('/')[-1] json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(test_img, json_path, raw_path, vis_path) else: base_dir = os.path.join(base_dir, 'test_dir', test_dir.rstrip('/').split('/')[-1]) if not os.path.exists(base_dir): os.makedirs(base_dir) for filename in FileHelper.list_dir(test_dir): image_path = os.path.join(test_dir, filename) json_path = os.path.join( base_dir, 'json', '{}.json'.format('.'.join(filename.split('.')[:-1]))) raw_path = os.path.join(base_dir, 'raw', filename) vis_path = os.path.join( base_dir, 'vis', '{}_vis.png'.format('.'.join(filename.split('.')[:-1]))) FileHelper.make_dirs(json_path, is_file=True) FileHelper.make_dirs(raw_path, is_file=True) FileHelper.make_dirs(vis_path, is_file=True) self.__test_img(image_path, json_path, raw_path, vis_path) def debug(self): base_dir = os.path.join(self.configer.get('project_dir'), 'vis/results/det', self.configer.get('dataset'), 'debug') if not os.path.exists(base_dir): os.makedirs(base_dir) count = 0 for i, data_dict in enumerate(self.det_data_loader.get_trainloader()): inputs = data_dict['img'] batch_gt_bboxes = data_dict['bboxes'] batch_gt_labels = data_dict['labels'] input_size = [inputs.size(3), inputs.size(2)] feat_list = list() for stride in self.configer.get('network', 'stride_list'): feat_list.append( torch.zeros((inputs.size(0), 1, input_size[1] // stride, input_size[0] // stride))) targets, _, _ = self.yolo_target_generator(feat_list, batch_gt_bboxes, batch_gt_labels, input_size) targets = targets.to(self.device) anchors_list = self.configer.get('gt', 'anchors_list') output_list = list() be_c = 0 for f_index, anchors in enumerate(anchors_list): feat_stride = self.configer.get('network', 'stride_list')[f_index] fm_size = [ int(round(border / feat_stride)) for border in input_size ] num_c = len(anchors) * fm_size[0] * fm_size[1] output_list.append( targets[:, be_c:be_c + num_c].contiguous().view( targets.size(0), len(anchors), fm_size[1], fm_size[0], -1).permute(0, 1, 4, 2, 3).contiguous().view( targets.size(0), -1, fm_size[1], fm_size[0])) be_c += num_c batch_detections = self.decode( self.yolo_detection_layer(output_list)[2], self.configer) for j in range(inputs.size(0)): count = count + 1 if count > 20: exit(1) ori_img_bgr = self.blob_helper.tensor2bgr(inputs[j]) json_dict = self.__get_info_tree(batch_detections[j], ori_img_bgr) image_canvas = self.det_parser.draw_bboxes( ori_img_bgr.copy(), json_dict, conf_threshold=self.configer.get('vis', 'obj_threshold')) cv2.imwrite( os.path.join(base_dir, '{}_{}_vis.png'.format(i, j)), image_canvas) cv2.imshow('main', image_canvas) cv2.waitKey()