def predict_single_flow_rgb_stack_3(self, flow_frame, rgb_frame, score_name, over_sample=True, frame_size=None, multiscale=None, score_name_1=None, score_name_2=None): flow_1 = fast_list2arr([cv2.resize(x, frame_size) for x in flow_frame]) flow_2 = flow_stack_oversample( flow_1, (self._sample_shape[2], self._sample_shape[3])) flow_data = flow_2 - np.float32(128.0) rgb_1 = [cv2.resize(x, frame_size) for x in rgb_frame] rgb_2 = oversample(rgb_1, (self._sample_shape[2], self._sample_shape[3])) # rgb_data1 = fast_list2arr(os_frame_rgb) # print rgb_data1.shape def preprocess_1(r): r = r.transpose(2, 0, 1) r[0, :, :] = r[0, :, :] - 104 r[1, :, :] = r[1, :, :] - 117 r[2, :, :] = r[2, :, :] - 123 return r rgb_data = fast_list2arr([preprocess_1(x) for x in rgb_2]) # print flow_data.shape # print rgb_data.shape # flow_data = np.reshape(flow_data, (10,-1,224,224)) rgb_data = np.reshape(rgb_data, (10, -1, 224, 224)) # print flow_data.shape # print rgb_data.shape # data = np.array([], dtype = rgb_data[0].dtype) data = np.concatenate((flow_data, rgb_data), axis=1) # print data.shape self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) if score_name_1 is not None and score_name_2 is not None: out_1 = self._net.forward(blobs=[ score_name_1, ], data=data) out_2 = self._net.forward(blobs=[ score_name_2, ], data=data) # print "here" return out[score_name].copy(), out_1[score_name_1].copy( ), out_2[score_name_2].copy() return out[score_name].copy()
def predict_single_c3d_rgb_stack(self, frame, score_name, over_sample=True, frame_size=None): if frame_size is not None: # print('frame size ' + str(frame_size)) # print('x shape ' + str(frame[0].shape)) frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame]) else: frame = fast_list2arr(frame) if over_sample: os_frame = c3d_rgb_stack_oversample( frame, (self._sample_shape[3], self._sample_shape[4])) else: os_frame = fast_list2arr([frame]) data = os_frame - np.float32([104.0, 117.0, 123.0]) data = np.transpose(data, [0, 4, 1, 2, 3]) self._net.blobs['data'].data[...] = data.copy() self._net.forward() out = self._net.blobs[score_name].data.copy() return out.copy()
def extract_batch_rgb(self, frame, feature_layers, frame_size=None): """ batch(>=1) * (3, H, W) RGB images as inputs, center crop is used. fc_action layer dim=num_cls top_cls_global_pool dim=2048 [only for Inception V3] global_pool dim=1024 [only for BNInception] param:: frame: a list of cv2 arrays (numpy) param:: feature_layers: a list of blobs you want to get param:: frame_size: if not None, do resize return:: a list of data from blobs specified by feature layer """ if frame_size is not None: frame = [cv2.resize(x, frame_size) for x in frame] frame = fast_list2arr(frame) crop_frame = image_array_center_crop( frame, (self._sample_shape[2], self._sample_shape[3])) data = fast_list2arr( [self._transformer.preprocess('data', x) for x in crop_frame]) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=feature_layers, data=data) return [ out[feat_layer].copy().squeeze() for feat_layer in feature_layers ]
def extract_batch_flow_stack(self, frame, feature_layers, frame_size=None): """ batch(>=1) * 5 pairs of flow images as inputs, center crop is used. fc_action layer dim=num_cls top_cls_global_pool dim=2048 [only for Inception V3] global_pool dim=1024 [only for BNInception] param:: frame: a list of cv2 arrays (numpy) param:: feature_layers: a list of blobs you want to get param:: frame_size: if not None, do resize return:: a list of data from blobs specified by feature layer """ if frame_size is not None: frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame]) else: frame = fast_list2arr(frame) crop_frame = image_array_center_crop( frame, (self._sample_shape[2], self._sample_shape[3])) crop_frame = crop_frame.reshape( (-1, ) + self._sample_shape[1:]) # make batch, only needed for flow data = crop_frame - np.float32(128.0) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=feature_layers, data=data) return [ out[feat_layer].copy().squeeze() for feat_layer in feature_layers ]
def predict_single_flow_stack(self, frame, score_name, over_sample=True, frame_size=None): if frame is None: return [0.0, 0.0] if frame_size is not None: frame = fast_list2arr([ cv2.resize(x, frame_size, interpolation=cv2.INTER_AREA) for x in frame ]) else: frame = fast_list2arr(frame) os_frame = fast_list2arr([frame]) data = os_frame - np.float32(128.0) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_flow_stack(self, frame, score_name, over_sample=True, frame_size=None): if frame_size is not None: frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame]) else: frame = fast_list2arr(frame) if over_sample: # (4 corner + 1 center) * 2 os_frame = flow_stack_oversample( frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = fast_list2arr([frame]) data = os_frame - np.float32(128.0) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_rgb_stack_memory(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25): if frame_size is not None: frame = [cv2.resize(x, frame_size) for x in frame] if over_sample: if multiscale is None: os_frame = oversample_for_rgb_stack( frame, (self._sample_shape[2], self._sample_shape[3]), stack_len) else: os_frame = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) def preprocess_1(r): r = r.transpose(2, 0, 1) r[0, :, :] = r[0, :, :] - 104 r[1, :, :] = r[1, :, :] - 117 r[2, :, :] = r[2, :, :] - 123 return r data = fast_list2arr([preprocess_1(x) for x in os_frame]) # self._net.blobs['data'].reshape(*data.shape) # self._net.reshape() # # out = self._net.forward(blobs=[score_name,], data=data) # return out[score_name].copy() data_new = data.reshape(-1, 3 * stack_len, 224, 224) scores_new = [] for i in range(10): data_ele = data_new[i] self._net.blobs['data'].reshape(*data_ele.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data_ele) scores_new.append(out[score_name].copy()) scores_new = np.array(scores_new).reshape(10, -1) return scores_new
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None): if frame_size is not None: frame1 = fast_list2arr([x for x in frame]) frame = [cv2.resize(x, frame_size) for x in frame] #print frame1.shape if over_sample: if multiscale is None: os_frame = oversample( frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) #print os_frame.shape #data = fast_list2arr([self._transformer.preprocess('data', x) for x in os_frame]) def preprocess_1(r): r = r.transpose(2, 0, 1) r[0, :, :] = r[0, :, :] - 104 r[1, :, :] = r[1, :, :] - 117 r[2, :, :] = r[2, :, :] - 123 return r data = fast_list2arr([preprocess_1(x) for x in os_frame]) #print data.shape self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, attention_name=None): if frame_size is not None: frame = [cv2.resize(x, frame_size) for x in frame] if over_sample: if multiscale is None: os_frame = oversample(frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = [] for scale in multiscale: resized_frame = [cv2.resize(x, (0,0), fx=1.0/scale, fy=1.0/scale) for x in frame] os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) # data = fast_list2arr([self._transformer.preprocess('data', x) for x in os_frame]) data = os_frame - [104.0, 117.0, 123.0] data = data.transpose(0, 3, 1, 2) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() if attention_name is None: out = self._net.forward(blobs=[score_name,], data=data) return out[score_name].copy() else: out = self._net.forward(blobs=[score_name, attention_name], data=data) return out[score_name].copy(), out[attention_name].copy()
def predict_flow_stack(self, frame, score_name, over_sample=True, frame_size=None): if frame_size is not None: frame = [ cv2.resize(x.transpose(1, 2, 0), frame_size) for x in frame ] frame = [x.transpose(2, 0, 1) for x in frame] if over_sample: os_frame = flow_stack_mirrorsample( frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = fast_list2arr(frame) data = os_frame - np.float32(128.0) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, attention_name=None): if frame_size is not None: frame = [cv2.resize(x, frame_size) for x in frame] if over_sample: if multiscale is None: os_frame = oversample(frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = [] for scale in multiscale: resized_frame = [cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame] os_frame.extend(oversample(resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) # data = fast_list2arr([self._transformer.preprocess('data', x) for x in os_frame]) data = os_frame - [104.0, 117.0, 123.0] data = data.transpose(0, 3, 1, 2) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() if attention_name is None: out = self._net.forward(blobs=[score_name, ], data=data) return out[score_name].copy() else: out = self._net.forward(blobs=[score_name, attention_name], data=data) return out[score_name].copy(), out[attention_name].copy()
def predict_single_flow_stack(self, frame, score_name, over_sample=True, frame_size=None): if frame_size is not None: frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame]) else: frame = fast_list2arr(frame) if over_sample: os_frame = flow_stack_oversample(frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = fast_list2arr([frame]) data = os_frame - np.float32(128.0) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[score_name,], data=data) return out[score_name].copy()
def predict_single_c3d_rgb_stack(self, frame, score_name, over_sample=True, frame_size=None): if frame_size is not None: frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame]) else: frame = fast_list2arr(frame) if over_sample: os_frame = c3d_rgb_stack_oversample(frame, (self._sample_shape[3], self._sample_shape[4])) else: os_frame = fast_list2arr([frame]) data = os_frame - np.float32([104.0,117.0,123.0]); data = np.transpose(data, [0,4,1,2,3]) self._net.blobs['data'].data[...] = data.copy() self._net.forward() out = self._net.blobs[score_name].data.copy() return out.copy()
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, multicrop=True): if frame_size is not None: frame = [cv2.resize(x, frame_size) for x in frame] if over_sample: if multiscale is None: if multicrop: os_frame = oversample( frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = mirrorsample( frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) data = fast_list2arr( [self._transformer.preprocess('data', x) for x in os_frame]) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_frame(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None): if frame_size is not None: frame = [ cv2.resize(x, frame_size, interpolation=cv2.INTER_AREA) for x in frame ] os_frame = fast_list2arr(frame) data = fast_list2arr( [self._transformer.preprocess('data', x) for x in os_frame]) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_flow_stack_feature_map(self, frame, score_name, over_sample=False, frame_size=None, blobname='conv1/7x7_s2', dim=30): if frame_size is not None: frame = fast_list2arr([cv2.resize(x, frame_size) for x in frame]) else: frame = fast_list2arr(frame) print "frame", frame.shape if over_sample: os_frame = flow_stack_oversample( frame, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = fast_list2arr([frame]) print "os_frame", os_frame.shape # (10, 256, 340) # (10, 10, 224, 224) data = os_frame - np.float32(128.0) print data.shape #self._net.blobs['data'].reshape(*data.shape) print self._net.blobs['data'].data[0].shape self._net.blobs['data'].data[...] = data #self._net.reshape() out = self._net.forward() #data=data feat = self._net.blobs[blobname].data[0, :dim] return feat.copy()
def predict_single_flow_stack_test_memory(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25): if over_sample: if multiscale is None: os_frame = oversample_for_flow_stack_test( frame, (self._sample_shape[2], self._sample_shape[3]), stack_len) else: os_frame = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) os_frame = np.array(os_frame).transpose(0, 3, 1, 2) data = os_frame - np.float32(128.0) # self._net.blobs['data'].reshape(*data.shape) # self._net.reshape() # out = self._net.forward(blobs=[score_name,], data=data) # return out[score_name].copy() data_new = data.reshape(-1, 10 * stack_len, 224, 224) scores_new = [] for i in range(10): data_ele = data_new[i] self._net.blobs['data'].reshape(*data_ele.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data_ele) scores_new.append(out[score_name].copy()) scores_new = np.array(scores_new).reshape(10, -1) return scores_new
def predict_single_flow_stack_test(self, frame, score_name, over_sample=True, multiscale=None, frame_size=None, stack_len=25): if over_sample: if multiscale is None: os_frame = oversample_for_flow_stack_test( frame, (self._sample_shape[2], self._sample_shape[3]), stack_len) else: os_frame = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) os_frame = np.array(os_frame).transpose(0, 3, 1, 2) data = os_frame - np.float32(128.0) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) return out[score_name].copy()
def predict_single_frame_with_attention(self, frame, score_name, joints, over_sample=True, multiscale=None, frame_size=None): # TODO: uncomment the following to visualize # img_id = random.randint(0, 1000) # cv2.imwrite('visualize/{}_ori_img.jpg'.format(img_id), frame[0]) if frame_size is not None: frame = [cv2.resize(x, frame_size) for x in frame] pose_map = np.zeros(frame_size, dtype='float32') scale_x = pose_map.shape[0] / 255. # row scale_y = pose_map.shape[1] / 255. # col pose_map = [ np.expand_dims(generateLimb(pose_map, joints, scale_x, scale_y), axis=2), ] # TODO: uncomment the following to visualize # cv2.imwrite('visualize/{}_ori_img.jpg'.format(img_id), frame[0]) # cv2.imwrite('visualize/{}_ori_pose.jpg'.format(img_id), pose_map[0]) # img_grey_ori = cv2.cvtColor(frame[0], cv2.COLOR_BGRA2GRAY) # pose_concat = np.tile(pose_map[0].astype('uint8'), 3) # pose_squeezed = pose_map[0].astype('uint8').squeeze(axis=2) # pose_color_map = cv2.applyColorMap(pose_concat, cv2.COLORMAP_JET) # img_merge_ori = cv2.addWeighted(frame[0], 0.5, pose_color_map, 0.5, 0) # cv2.imwrite('visualize/{}_ori_weighted.jpg'.format(img_id), img_merge_ori) if over_sample: if multiscale is None: os_frame = oversample( frame, (self._sample_shape[2], self._sample_shape[3])) os_pose_map = oversample( pose_map, (self._sample_shape[2], self._sample_shape[3])) else: os_frame = [] os_pose_map = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] resized_pose_map = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in pose_map ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) os_pose_map.extend( oversample( resized_pose_map, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) os_pose_map = fast_list2arr(pose_map) # TODO: uncomment the following to visualize # for i in xrange(os_frame.shape[0]): # img_to_show_ = os_frame[i, :, :, :].squeeze() # pose_to_show_ = os_pose_map[i, :, :, :].squeeze() # # img_grey_ori_ = cv2.cvtColor(img_to_show_, cv2.COLOR_BGRA2GRAY).astype('uint8') # pose_squeezed_ = pose_to_show_.astype('uint8') # img_merge_ori = cv2.addWeighted(img_grey_ori_, 0.5, pose_squeezed_, 0.5, 0) # cv2.imwrite('visualize/{}_{}_weighted.jpg'.format(img_id, i), img_merge_ori) # cv2.imwrite('visualize/{}_{}_img.jpg'.format(img_id, i), img_to_show_) # cv2.imwrite('visualize/{}_{}_pose.jpg'.format(img_id, i), pose_to_show_) raw_data = np.append(os_frame, os_pose_map, axis=3) ##################################################################### data = fast_list2arr( [self._transformer.preprocess('data', x) for x in raw_data]) # TODO: uncomment the following to visualize # for i in xrange(os_frame.shape[0]): # img_to_show = data[i, :3, :, :].squeeze().transpose(1, 2, 0) # pose_to_show = data[i, 3, :, :].squeeze() # img_to_show[:, :, 0] += 104 # img_to_show[:, :, 1] += 117 # img_to_show[:, :, 2] += 123 # # print img_to_show.shape # print pose_to_show.shape # img_grey_ori = cv2.cvtColor(img_to_show, cv2.COLOR_BGRA2GRAY).astype('uint8') # pose_squeezed = pose_to_show.astype('uint8') # img_merge_ori = cv2.addWeighted(img_grey_ori, 0.5, pose_squeezed, 0.5, 0) # cv2.imwrite('visualize/{}_{}_weighted_post.jpg'.format(img_id, i), img_merge_ori) # cv2.imwrite('visualize/{}_{}_img_post.jpg'.format(img_id, i), img_to_show) # cv2.imwrite('visualize/{}_{}_pose_post.jpg'.format(img_id, i), pose_to_show) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) print out.max() return out[score_name].copy()
def predict_single_frame_with_roi(self, frame, score_name, joints, over_sample=True, multiscale=None, frame_size=None): # TODO: uncomment the following to visualize # img_id = random.randint(0, 1000) # cv2.imwrite('visualize/{}_ori_img.jpg'.format(img_id), frame[0]) assert isinstance(frame_size, tuple) frame = [cv2.resize(x, frame_size) for x in frame] use_roi = False scale_x = frame_size[0] / 336. # row scale_y = frame_size[1] / 256. # col if joints: roi_top_w, roi_top_h, roi_w, roi_h = generateROI( joints, [0, 13, 14, 15, 16, 17], scale_x, scale_y, 40, 40) if roi_h > 40 and roi_w > 40: use_roi = True # TODO: uncomment the following to visualize # cv2.imwrite('visualize/{}_ori_img.jpg'.format(img_id), frame[0]) # cv2.imwrite('visualize/{}_ori_pose.jpg'.format(img_id), pose_map[0]) # img_grey_ori = cv2.cvtColor(frame[0], cv2.COLOR_BGRA2GRAY) # pose_concat = np.tile(pose_map[0].astype('uint8'), 3) # pose_squeezed = pose_map[0].astype('uint8').squeeze(axis=2) # pose_color_map = cv2.applyColorMap(pose_concat, cv2.COLORMAP_JET) # img_merge_ori = cv2.addWeighted(frame[0], 0.5, pose_color_map, 0.5, 0) # cv2.imwrite('visualize/{}_ori_weighted.jpg'.format(img_id), img_merge_ori) if over_sample: if multiscale is None and not use_roi: os_frame = oversample( frame, (self._sample_shape[2], self._sample_shape[3])) elif use_roi: os_frame = [] roi_mult_list = np.arange(2., 3., 0.1).tolist() for roi_mult in roi_mult_list: roi_top_w, roi_top_h, roi_w, roi_h = generateROI( joints, [0, 13, 14, 15, 16, 17], scale_x, scale_y, 40, 40, roi_mult) target_size = (self._sample_shape[2], self._sample_shape[3]) resized_roi = [ cv2.resize( x[roi_top_h:roi_h + roi_top_h, roi_top_w:roi_w + roi_top_w], target_size) for x in frame ] os_frame.extend(resized_roi) else: os_frame = [] for scale in multiscale: resized_frame = [ cv2.resize(x, (0, 0), fx=1.0 / scale, fy=1.0 / scale) for x in frame ] os_frame.extend( oversample( resized_frame, (self._sample_shape[2], self._sample_shape[3]))) else: os_frame = fast_list2arr(frame) # TODO: uncomment the following to visualize # for i in xrange(len(os_frame)): # img_to_show_ = os_frame[i].squeeze() # cv2.imwrite('visualize/{}_{}_img.jpg'.format(img_id, i), img_to_show_) # pose_to_show_ = os_pose_map[i, :, :, :].squeeze() # # img_grey_ori_ = cv2.cvtColor(img_to_show_, cv2.COLOR_BGRA2GRAY).astype('uint8') # pose_squeezed_ = pose_to_show_.astype('uint8') # img_merge_ori = cv2.addWeighted(img_grey_ori_, 0.5, pose_squeezed_, 0.5, 0) # cv2.imwrite('visualize/{}_{}_weighted.jpg'.format(img_id, i), img_merge_ori) # cv2.imwrite('visualize/{}_{}_img.jpg'.format(img_id, i), img_to_show_) # cv2.imwrite('visualize/{}_{}_pose.jpg'.format(img_id, i), pose_to_show_) # raw_data = np.append(os_frame, os_pose_map, axis=3) ##################################################################### data = fast_list2arr( [self._transformer.preprocess('data', x) for x in os_frame]) # TODO: uncomment the following to visualize # for i in xrange(os_frame.shape[0]): # img_to_show = data[i, :3, :, :].squeeze().transpose(1, 2, 0) # pose_to_show = data[i, 3, :, :].squeeze() # img_to_show[:, :, 0] += 104 # img_to_show[:, :, 1] += 117 # img_to_show[:, :, 2] += 123 # # print img_to_show.shape # print pose_to_show.shape # img_grey_ori = cv2.cvtColor(img_to_show, cv2.COLOR_BGRA2GRAY).astype('uint8') # pose_squeezed = pose_to_show.astype('uint8') # img_merge_ori = cv2.addWeighted(img_grey_ori, 0.5, pose_squeezed, 0.5, 0) # cv2.imwrite('visualize/{}_{}_weighted_post.jpg'.format(img_id, i), img_merge_ori) # cv2.imwrite('visualize/{}_{}_img_post.jpg'.format(img_id, i), img_to_show) # cv2.imwrite('visualize/{}_{}_pose_post.jpg'.format(img_id, i), pose_to_show) self._net.blobs['data'].reshape(*data.shape) self._net.reshape() out = self._net.forward(blobs=[ score_name, ], data=data) # print np.argmax(out[score_name], axis=1) # TODO: check wrong samples return out[score_name].copy()