def __call__(self, oriImg): scale_search = [0.5, 1.0, 1.5, 2.0] # scale_search = [0.5] boxsize = 368 stride = 8 padValue = 128 thre = 0.0#0.05 multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22)) # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) for m in range(len(multiplier)): scale = multiplier[m] imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 im = np.ascontiguousarray(im) data = torch.from_numpy(im).float() #import pdb;pdb.set_trace() if torch.cuda.is_available(): data = data.cuda() # data = data.permute([2, 0, 1]).unsqueeze(0).float() with torch.no_grad(): output = self.model(data).cpu().numpy() # output = self.model(data).numpy()q # extract outputs, resize, and remove padding heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg += heatmap / len(multiplier) all_peaks = [] all_peaks_value = [] for part in range(21): map_ori = heatmap_avg[:, :, part] one_heatmap = gaussian_filter(map_ori, sigma=3) binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8) #import pdb;pdb.set_trace() # 全部小于阈值 if np.sum(binary) == 0: all_peaks.append([0, 0]) all_peaks_value.append([0.0])#cyj continue label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim) #import pdb;pdb.set_trace() max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1 label_img[label_img != max_index] = 0 map_ori[label_img == 0] = 0 #import pdb;pdb.set_trace() y, x = util.npmax(map_ori) all_peaks.append([x, y]) # confidence coefficient value = map_ori[y,x] all_peaks_value.append([value]) #return np.array(all_peaks) return np.array(all_peaks), np.array(all_peaks_value)
torchmodel = torchnet.cpu() model_dict = util.transfer(torchmodel, torch.load(torchmodelfile)) torchmodel.load_state_dict(model_dict) torchmodel.eval() frame = cv.imread("images/demo.jpg") frameWidth = frame.shape[1] frameHeight = frame.shape[0] stride = 8 padValue = 128 #imageToTest = cv.resize(frame, (0, 0), fx=368, fy=368, interpolation=cv.INTER_CUBIC) imageToTest = cv.resize(frame, (368, 368)) imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 im = np.ascontiguousarray(im) data = torch.from_numpy(im).float() if torch.cuda.is_available(): data = data.cuda() # data = data.permute([2, 0, 1]).unsqueeze(0).float() with torch.no_grad(): Mconv7_stage6_L1, Mconv7_stage6_L2 = torchmodel(data) Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() heatmap2 = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) out = heatmap2
def __call__(self, oriImg): if self.multi_scale == True: scale_search = [0.5, 1.0, 1.5, 2.0] else: scale_search = [0.5] boxsize = 368 stride = 8 padValue = 128 thre1 = 0.1 thre2 = 0.05 multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) heatmap_list = [] heatmap_list_converted_list = [] for m in range(len(multiplier)): scale = multiplier[m] imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) imageToTest_padded, pad = util.padRightDownCorner( imageToTest, stride, padValue) im = np.transpose( np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 im = np.ascontiguousarray(im) data = torch.from_numpy(im).float() if torch.cuda.is_available(): data = data.cuda() # data = data.permute([2, 0, 1]).unsqueeze(0).float() with torch.no_grad(): Mconv7_stage6_L1, Mconv7_stage6_L2, heatmap_list = self.model( data) Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() # extract outputs, resize, and remove padding # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_list_converted = to_heatmaps(heatmap_list, stride, imageToTest_padded, pad, oriImg) heatmap_list_converted_list.append(heatmap_list_converted) # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg += heatmap_avg + heatmap / len(multiplier) paf_avg += +paf / len(multiplier) heatmap_list.append(heatmap) all_peaks = [] peak_counter = 0 for part in range(18): map_ori = heatmap_avg[:, :, part] one_heatmap = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(one_heatmap.shape) map_left[1:, :] = one_heatmap[:-1, :] map_right = np.zeros(one_heatmap.shape) map_right[:-1, :] = one_heatmap[1:, :] map_up = np.zeros(one_heatmap.shape) map_up[:, 1:] = one_heatmap[:, :-1] map_down = np.zeros(one_heatmap.shape) map_down[:, :-1] = one_heatmap[:, 1:] peaks_binary = np.logical_and.reduce( (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) peaks = list( zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] peak_id = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [ peaks_with_score[i] + (peak_id[i], ) for i in range(len(peak_id)) ] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) # find connection in the specified sequence, center 29 is in the position 15 limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ [1, 16], [16, 18], [3, 17], [6, 18]] # the middle joints heatmap correpondence mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ [55, 56], [37, 38], [45, 46]] connection_all = [] special_k = [] mid_num = 10 for k in range(len(mapIdx)): score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] candA = all_peaks[limbSeq[k][0] - 1] candB = all_peaks[limbSeq[k][1] - 1] nA = len(candA) nB = len(candB) indexA, indexB = limbSeq[k] if (nA != 0 and nB != 0): connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) vec = np.divide(vec, norm) startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ np.linspace(candA[i][1], candB[j][1], num=mid_num))) vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ for I in range(len(startend))]) vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ for I in range(len(startend))]) score_midpts = np.multiply( vec_x, vec[0]) + np.multiply(vec_y, vec[1]) score_with_dist_prior = sum(score_midpts) / len( score_midpts) + min( 0.5 * oriImg.shape[0] / (norm + 1e-5) - 1, 0) criterion1 = len(np.nonzero( score_midpts > thre2)[0]) > 0.8 * len(score_midpts) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2] ]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if (i not in connection[:, 3] and j not in connection[:, 4]): connection = np.vstack( [connection, [candA[i][3], candB[j][3], s, i, j]]) if (len(connection) >= min(nA, nB)): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones((0, 20)) candidate = np.array( [item for sublist in all_peaks for item in sublist]) for k in range(len(mapIdx)): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(limbSeq[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][ indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if subset[j][indexB] != partBs[i]: subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found and k < 17: row = -1 * np.ones(20) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum( candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] subset = np.vstack([subset, row]) # delete some rows of subset which has few parts occur deleteIdx = [] for i in range(len(subset)): if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: deleteIdx.append(i) subset = np.delete(subset, deleteIdx, axis=0) # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts # candidate: x, y, score, id return candidate, subset, heatmap_list, heatmap_list_converted_list
def process(oriImg, model, params, model_params): canvas = np.copy(oriImg) #print(oriImg.shape) #print('params[scale_search]:{}'.format(list(params['scale_search']))) #print('model_params[boxsize]:{}'.format(model_params['boxsize'])) #print('oriImg.shape[0]:{}'.format(oriImg.shape[0])) multiplier = [ x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search'] ] #print(list(multiplier)) #print('multiplier:{}'.format(len(multiplier))) heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) for m in range(len(multiplier)): scale = multiplier[m] imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) imageToTest_padded, pad = util.padRightDownCorner( imageToTest, model_params['stride'], model_params['padValue']) input_img = np.transpose( np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 0, 1, 2)) # required shape (1, width, height, channels) output_blobs = model.predict(input_img) # extract outputs, resize, and remove padding heatmap = np.squeeze(output_blobs[1]) # output 1 is heatmaps heatmap = cv2.resize(heatmap, (0, 0), fx=model_params['stride'], fy=model_params['stride'], interpolation=cv2.INTER_CUBIC) heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) paf = np.squeeze(output_blobs[0]) # output 0 is PAFs paf = cv2.resize(paf, (0, 0), fx=model_params['stride'], fy=model_params['stride'], interpolation=cv2.INTER_CUBIC) paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) heatmap_avg = heatmap_avg + heatmap / len(multiplier) paf_avg = paf_avg + paf / len(multiplier) all_peaks = [] peak_counter = 0 for part in range(18): map_ori = heatmap_avg[:, :, part] map = gaussian_filter(map_ori, sigma=3) map_left = np.zeros(map.shape) map_left[1:, :] = map[:-1, :] map_right = np.zeros(map.shape) map_right[:-1, :] = map[1:, :] map_up = np.zeros(map.shape) map_up[:, 1:] = map[:, :-1] map_down = np.zeros(map.shape) map_down[:, :-1] = map[:, 1:] peaks_binary = np.logical_and.reduce( (map >= map_left, map >= map_right, map >= map_up, map >= map_down, map > params['thre1'])) peaks = list( zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse peaks_with_score = [x + (map_ori[x[1], x[0]], ) for x in peaks] id = range(peak_counter, peak_counter + len(peaks)) peaks_with_score_and_id = [ peaks_with_score[i] + (id[i], ) for i in range(len(id)) ] all_peaks.append(peaks_with_score_and_id) peak_counter += len(peaks) connection_all = [] special_k = [] mid_num = 10 for k in range(len(mapIdx)): score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] candA = all_peaks[limbSeq[k][0] - 1] candB = all_peaks[limbSeq[k][1] - 1] nA = len(candA) nB = len(candB) indexA, indexB = limbSeq[k] if (nA != 0 and nB != 0): connection_candidate = [] for i in range(nA): for j in range(nB): vec = np.subtract(candB[j][:2], candA[i][:2]) norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) # failure case when 2 body parts overlaps if norm == 0: continue vec = np.divide(vec, norm) startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ np.linspace(candA[i][1], candB[j][1], num=mid_num))) vec_x = np.array( [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ for I in range(len(startend))]) vec_y = np.array( [score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ for I in range(len(startend))]) score_midpts = np.multiply(vec_x, vec[0]) + np.multiply( vec_y, vec[1]) score_with_dist_prior = sum( score_midpts) / len(score_midpts) + min( 0.5 * oriImg.shape[0] / norm - 1, 0) criterion1 = len( np.nonzero(score_midpts > params['thre2']) [0]) > 0.8 * len(score_midpts) criterion2 = score_with_dist_prior > 0 if criterion1 and criterion2: connection_candidate.append([ i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2] ]) connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) connection = np.zeros((0, 5)) for c in range(len(connection_candidate)): i, j, s = connection_candidate[c][0:3] if (i not in connection[:, 3] and j not in connection[:, 4]): connection = np.vstack( [connection, [candA[i][3], candB[j][3], s, i, j]]) if (len(connection) >= min(nA, nB)): break connection_all.append(connection) else: special_k.append(k) connection_all.append([]) # last number in each row is the total parts number of that person # the second last number in each row is the score of the overall configuration subset = -1 * np.ones((0, 20)) candidate = np.array([item for sublist in all_peaks for item in sublist]) for k in range(len(mapIdx)): if k not in special_k: partAs = connection_all[k][:, 0] partBs = connection_all[k][:, 1] indexA, indexB = np.array(limbSeq[k]) - 1 for i in range(len(connection_all[k])): # = 1:size(temp,1) found = 0 subset_idx = [-1, -1] for j in range(len(subset)): # 1:size(subset,1): if subset[j][indexA] == partAs[i] or subset[j][ indexB] == partBs[i]: subset_idx[found] = j found += 1 if found == 1: j = subset_idx[0] if (subset[j][indexB] != partBs[i]): subset[j][indexB] = partBs[i] subset[j][-1] += 1 subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] elif found == 2: # if found 2 and disjoint, merge them j1, j2 = subset_idx membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] if len(np.nonzero(membership == 2)[0]) == 0: # merge subset[j1][:-2] += (subset[j2][:-2] + 1) subset[j1][-2:] += subset[j2][-2:] subset[j1][-2] += connection_all[k][i][2] subset = np.delete(subset, j2, 0) else: # as like found == 1 subset[j1][indexB] = partBs[i] subset[j1][-1] += 1 subset[j1][-2] += candidate[ partBs[i].astype(int), 2] + connection_all[k][i][2] # if find no partA in the subset, create a new subset elif not found and k < 17: row = -1 * np.ones(20) row[indexA] = partAs[i] row[indexB] = partBs[i] row[-1] = 2 row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + \ connection_all[k][i][2] subset = np.vstack([subset, row]) # delete some rows of subset which has few parts occur deleteIdx = [] for i in range(len(subset)): if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: deleteIdx.append(i) subset = np.delete(subset, deleteIdx, axis=0) #canvas = oriImg # B,G,R order for i in range(18): for j in range(len(all_peaks[i])): cv2.circle(canvas, all_peaks[i][j][0:2], 3, colors[i], thickness=-1) cv2.putText(canvas, str(i), all_peaks[i][j][0:2], 0, 5e-3 * 100, (0, 0, 0), 1) pose_result = getJudge(all_peaks) if pose_result == 1: print('左上') cv2.putText(canvas, 'Upper left', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, [0, 255, 0], 1) elif pose_result == 2: print('右上') cv2.putText(canvas, 'Upper right', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, [0, 255, 0], 1) elif pose_result == 3: print('右下') cv2.putText(canvas, 'Bottom right', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, [0, 255, 0], 1) elif pose_result == 4: print('左下') cv2.putText(canvas, 'Bottom left', (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, [0, 255, 0], 1) stickwidth = 2 for i in range(17): for n in range(len(subset)): index = subset[n][np.array(limbSeq[i]) - 1] if -1 in index: continue cur_canvas = canvas.copy() Y = candidate[index.astype(int), 0] X = candidate[index.astype(int), 1] mX = np.mean(X) mY = np.mean(Y) length = ((X[0] - X[1])**2 + (Y[0] - Y[1])**2)**0.5 angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) polygon = cv2.ellipse2Poly( (int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) return canvas