def get_best_rect(self, rects_list, rects_xyz_list, rects_center): """ 对于找到的一堆矩形选取最为正确的 :param rects_list: 矩形4个点的list :param rects_xyz_list: 矩形xyz的4个点 :return: """ if len(rects_list) == 1: return rects_list[0], rects_xyz_list[0], rects_center[0] elif len(rects_list) == 0: return None, None, None else: #多于1个的情况 if DEBUG_FLAG: print("出现了多个矩形的情况,他们的矩形情况为:") print(rects_list) print(rects_xyz_list) correct = self.error correct_i = 0 for i, middle_xyzs in enumerate(rects_xyz_list): distance1 = tools.get_distance(middle_xyzs[0], middle_xyzs[3]) distance2 = tools.get_distance(middle_xyzs[1], middle_xyzs[2]) temp = abs((distance1 + distance2) / 2 - self.target_legnth / 2) - self.error if temp < correct: correct_i = i return rects_list[correct_i], rects_xyz_list[ correct_i], rects_center[correct_i]
def get_neighbors(nodes, node_id, node_type, d, auv_x, auv_y, auv_z, energy): max_weight = 0 neighbor_id = -1 for i in range(len(nodes[0])): if tools.get_distance(nodes[0][node_id], nodes[1][node_id], nodes[2][node_id], nodes[0][i], nodes[1][i], nodes[2][i]) <= d: if node_type[i] != 0: return i weight = energy[i] / tools.get_distance( nodes[0][i], nodes[1][i], nodes[2][i], auv_x, auv_y, auv_z) max_weight, neighbor_id = (weight, i) if weight > max_weight else ( max_weight, neighbor_id) return neighbor_id
def process_rect(self, station_rect, station_xyz): """ 用于处理资源岛的矩形 最终得到x,y,z需要更改的值,速度先不管 :param station_rect:矩形的四个点(而非2个点四个值) :param station_xyz:矩形的四个点对应的xyz :return: """ leftdown, leftup, rightup, rightdown = station_rect #这里得到的是xy,而非wh leftdown_xyz, leftup_xyz, rightup_xyz, rightdown_xyz = station_xyz #获取中心点的xyz值 left_center = tools.get_middle(leftdown, leftup) up_center = tools.get_middle(leftup, rightup) # if DEBUG_FLAG: # print("左边中心:",left_center,"右边中心为:",up_center) center = (up_center[0], left_center[1]) center_xyz = self.detectStation.camera.get_xyz(center) #获取左右的转角 x1, y1, z1 = leftup_xyz x2, y2, z2 = rightup_xyz distance = tools.get_distance(leftup_xyz, rightup_xyz) temp_z = z1 - z2 xita_rad = math.asin(temp_z / distance) xita = xita_rad * 180 / math.pi return center, center_xyz, xita
def select_from_point(self): for u in self.get_unit_list(): if tools.get_distance(self.init_xy, (u.x, u.y)) <= u.radius: if not u.is_selected(): self.to_select.append(u) elif self.keys[key.LSHIFT] and u.is_selected(): self.to_select.remove(u) self.run_selection()
def check_conditions(self): if tools.get_distance(self.unit.current_destination, self.unit.get_location()) < self.unit.radius*4: return "idleing" if self.idle_time >= self.waiting_period: print "trying again" return "movecommand" return None
def check_x_correct(self, middle_xyzs): """ 送入中心矩形框的xyz值,查看是否为目标 :param middle_xyzs:中心矩形的xyzs :return: """ distance1 = tools.get_distance(middle_xyzs[0], middle_xyzs[3]) distance2 = tools.get_distance(middle_xyzs[1], middle_xyzs[2]) X_temp = (distance1 + distance2) / 2 X_correct = abs(X_temp - self.target_legnth / 2) < self.error if DEBUG_FLAG: print("正确的X_temp为:", X_temp, "期望的距离为:", self.target_legnth / 2) print("检测到的rect的X_temp为:", X_temp, "期望的距离为:", self.target_legnth / 2) return X_correct
def check_conditions(self): if tools.get_distance(self.unit.current_destination, self.unit.get_location()) < self.unit.radius * 4: return "idleing" if self.idle_time >= self.waiting_period: print "trying again" return "movecommand" return None
def judge_node_type(nodes, node_type, phi, rho, omega, v_h, r, d, h, flag): number = len(nodes[0]) for i in range(number): # 如果节点与sink节点的距离小于等于通信半径,则为直接送达节点。这边本来应该是 <=r ,为了简单直接用安全距离。 if tools.get_distance(nodes[0][i], nodes[1][i], nodes[2][i], 0, 0, 0) <= d: node_type[i] = 2 continue # 解微分方程的方式求解太过麻烦,直接暴力破解。 t_max = h / v_h min_distance = 65536 for t in range(int(t_max)): auv_x, auv_y, auv_z = auv_position.get_auv_position_at_time_t( t, phi, rho, omega, v_h, h, flag) distance = tools.get_distance(nodes[0][i], nodes[1][i], nodes[2][i], auv_x, auv_y, auv_z) min_distance = distance if min_distance >= distance else min_distance if min_distance <= d: node_type[i] = 1
def collides(unit, other): x1, y1 = unit.x + unit.dx, unit.y + unit.dy x2, y2 = other.x + other.dx, other.y + other.dy distance_old = tools.get_distance((unit.x, unit.y), (other.x, other.y)) distance = tools.get_distance((x1, y1), (x2, y2)) if unit.shape == "circle" and other.shape == "circle": if distance <= unit.radius + other.radius: return distance else: return False if unit.shape == "rectangle" and other.shape == "circle": if other.x + other.radius > unit.left and other.x - other.radius < unit.right: if other.y + other.radius > unit.bottom and other.y - other.radius < unit.top: return True else: return False if unit.shape == "circle" and other.shape == "rectangle": if unit.x > other.left and unit.x < other.right: if unit.y > other.bottom and unit.y < other.top: return True else: return False
def check_conditions(self): ranging_distance = tools.get_distance(self.unit.leash_point, self.unit.get_location()) if ranging_distance > self.unit.alert_range: print "too far!" self.unit.current_destination = self.unit.leash_point return "movecommand" # hostile_unit = self.unit.controller.get_close_entity(self.unit.get_location()) # if not hostile_unit: # self.unit.target = None # print "target lost" # return "idleing" return None
files=os.listdir('testVedios/test'+nb_video+'/') print(direction[1:]) for i in files: if (len(re.findall('joints_3DKinect_.'+direction[1:]+'[^2]', i))!=0): filecm=i print(filecm) k=pd.DataFrame(pd.read_csv('testVedios/test'+nb_video+'/'+filecm)) kk=k # find wrong points badpoints=0 for i in range(len(k['frames'])): akdis=k['z_lak'][i]-k['z_rak'][i] kndis=k['z_lkn'][i]-k['z_rkn'][i] l_kn_ak=tools.get_distance((k['x_lkn'][i], k['y_lkn'][i], k['z_lkn'][i]), (k['x_lak'][i], k['y_lak'][i], k['z_lak'][i])) r_kn_ak=tools.get_distance((k['x_rkn'][i], k['y_rkn'][i], k['z_rkn'][i]), (k['x_rak'][i], k['y_rak'][i], k['z_rak'][i])) if i==20: print(i,akdis, kndis) if k['kangle_r'][i]<0 or (akdis**2<=400 and kndis<100 and kndis>-20) or (akdis**2<=400 and kndis>220): k.loc[i,['bad_frame']]=10 badpoints+=1 print(i, kndis) elif k['kangle_l'][i]<0 or (akdis**2<=400 and kndis<-205) or (akdis**2<=400 and kndis>-100 and kndis<10): k.loc[i,['bad_frame']]=11 badpoints+=1 print(i,kndis) ###!!!!!!! zuihao zuo you jiao fen kai left: 0 right: 1 elif (k['z_las'][i]-k['z_ras'][i])>80: k.loc[i,['bad_frame']]=20 badpoints+=1
def get_close_entity(self, location, e_range=100): for entity in self.get_unit_list(): distance = tools.get_distance(location, (entity.x, entity.y)) if distance < e_range: return entity return None
def get_distance_from_target(self): return tools.get_distance((self.unit.x, self.unit.y), (self.unit.current_destination[0], self.unit.current_destination[1]) )
def show_angle3d(nb_vedio, frames): files = os.listdir('testVedios/test' + nb_vedio + '/') for i in files: if (len(re.findall('.*C\.json', i)) != 0): filejson = i if (len(re.findall('.*D.mp4', i)) != 0): filedepth = i number = filedepth[:-5] #if (len(re.findall('_distance_data.csv', i))!=0): # filedis=i print(number) #print(filedis) print(filejson) #disdata=pd.DataFrame(pd.read_csv('testVedios/test'+nb_vedio+'/'+filedis)) fig = plt.figure() ax = fig.add_subplot(211) ax2 = fig.add_subplot(212) ax.set_title('3d kinect LCR-NET_Improved') ax.set_xlabel('Frame') ax.set_ylabel('Angle_left') ax2.set_xlabel('Frame') ax2.set_ylabel('Angle_right') with open('testVedios/test' + nb_vedio + '/' + filejson) as json_data: d = json.load(json_data) v = cv2.VideoCapture('testVedios/test' + nb_vedio + '/' + filedepth) anglesl = [] anglesr = [] points3dx = [[] for p in range(13)] points3dy = [[] for p in range(13)] points3dz = [[] for p in range(13)] points2dx = [[] for p in range(13)] points2dy = [[] for p in range(13)] points2ddx = [[] for p in range(13)] points2ddy = [[] for p in range(13)] akdis = [] kndis = [] step = [] bad_frame = [0] * (frames[1] - frames[0]) l_kn_ak = [] r_kn_ak = [] for t in tqdm(range(frames[0], frames[1])): #print(t) v.set(cv2.CAP_PROP_POS_FRAMES, t) ret, im = v.read() img_m = cv2.medianBlur(im, 7) font = cv2.FONT_HERSHEY_SIMPLEX ''' for i in range(800,980,2): for j in range(300,860,5): a=RGBto3D((i,j),im) x.append(a[0]) y.append(a[1]) z.append(a[2]) print(i) #ax.scatter(x,z,y) ''' if len(d['frames'][t]) != 0: xjoints2d = d['frames'][t][0]['pose2d'][:13] yjoints2d = d['frames'][t][0]['pose2d'][13:] xjoints3d = [] yjoints3d = [] zjoints3d = [] xjoints2dd = [] yjoints2dd = [] ###!!!!!!!!!11111!!!! leg = ['z_rak', 'z_lak', 'z_rkn', 'z_lkn', 'z_ras', 'z_las'] for i in range(13): if i <= -5: z3d = float(disdata[leg[i]][disdata['frame'] == t]) print(z3d) joints = tools.RGBto3D((xjoints2d[i], yjoints2d[i], z3d), im, t) xjoints3d.append(joints[0]) yjoints3d.append(joints[1]) zjoints3d.append(joints[2]) else: joints = tools.RGBto3D((xjoints2d[i], yjoints2d[i]), im, t, True, 7) xjoints3d.append(joints[0]) yjoints3d.append(joints[1]) zjoints3d.append(joints[2]) joints2dd = tools.RGBtoD((xjoints2d[i], yjoints2d[i]), t) xjoints2dd.append(joints2dd[0]) yjoints2dd.append(joints2dd[1]) for p in range(13): points3dx[p].append(xjoints3d[p]) points3dy[p].append(yjoints3d[p]) points3dz[p].append(zjoints3d[p]) points2dx[p].append(xjoints2d[p]) points2dy[p].append(yjoints2d[p]) points2ddx[p].append(xjoints2dd[p]) points2ddy[p].append(yjoints2dd[p]) anglesl.append(180 - tools.angle((xjoints3d[1], yjoints3d[1], zjoints3d[1]), (xjoints3d[3], yjoints3d[3], zjoints3d[3]), (xjoints3d[5], yjoints3d[5], zjoints3d[5]), False)) anglesr.append(180 - tools.angle((xjoints3d[0], yjoints3d[0], zjoints3d[0]), (xjoints3d[2], yjoints3d[2], zjoints3d[2]), (xjoints3d[4], yjoints3d[4], zjoints3d[4]), False)) # static analysis # save the important information:dis between ankles, as height, leg length l_kn_ak.append( tools.get_distance((xjoints3d[1], yjoints3d[1], zjoints3d[1]), (xjoints3d[3], yjoints3d[3], zjoints3d[3]))) r_kn_ak.append( tools.get_distance((xjoints3d[0], yjoints3d[0], zjoints3d[0]), (xjoints3d[2], yjoints3d[2], zjoints3d[2]))) dak = (zjoints3d[0] - zjoints3d[1]) dkn = (zjoints3d[2] - zjoints3d[3]) akdis.append(dak) kndis.append(dkn) # dynamic analysis # gen qian mian de zhen xiang bi jiao # Model : step automatic by Vicon !!!!front back!!!!! condition 1 2 3 # dis>0 : left joint is in front, dis<0 : right joint is in front if dkn <= 0 and dak > -100: step.append(0) elif dak <= -100 and dkn < 0: step.append(1) elif dkn >= 0 and dak < 100: step.append(2) elif dak >= 100 and dkn > 0: step.append(3) else: step.append(-1) # find the wrong frame # +++++++++++ # correct the wrong ponts # +++++++++++ # angle 2d # anglesl.append(180-tools.angle((yjoints[1],zjoints[1]),(yjoints[3],zjoints[3]),(yjoints[5],zjoints[5]))) # anglesr.append(180-tools.angle((yjoints[0],zjoints[0]),(yjoints[2],zjoints[2]),(yjoints[4],zjoints[4]))) ''' print(xjoints) print(yjoints) print(zjoints) ax2.scatter(xjoints, zjoints, yjoints, color='r') ''' v.release() #b, a = signal.butter(8, 0.3, 'lowpass') #anglesl = signal.filtfilt(b, a, anglesl) #anglesr = signal.filtfilt(b, a, anglesr) frames = range(frames[0], frames[1]) ax.plot(frames, anglesl) ax2.plot(frames, anglesr, color='r') #, marker='.') #for i in range(50): y1.append(i) # 每迭代一次,将i放入y1中画出来 ax.cla() # 清除键 ax.bar(y1, label='test', height=y1, width=0.3) ax.legend() plt.pause(0.1) data2d = {} data = {} data2dd = {} data2dd['frames'] = frames data2d['frames'] = frames data['frames'] = frames data['kangle_l'] = anglesl data['kangle_r'] = anglesr data['akdis'] = akdis data['kndis'] = kndis data['l_kn_ak'] = l_kn_ak data['r_kn_ak'] = r_kn_ak data['bad_frame'] = bad_frame print(len(step)) data['step'] = step j = [ 'rak', 'lak', 'rkn', 'lkn', 'ras', 'las', 'rwr', 'lwr', 'rel', 'lel', 'rsh', 'lsh', 'head' ] for p in range(13): data['x_' + j[p]] = points3dx[p] data['y_' + j[p]] = points3dy[p] data['z_' + j[p]] = points3dz[p] data2d['x_' + j[p]] = points2dx[p] data2d['y_' + j[p]] = points2dy[p] data2dd['x_' + j[p]] = points2ddx[p] data2dd['y_' + j[p]] = points2ddy[p] data = pd.DataFrame(data) data2d = pd.DataFrame(data2d) data2dd = pd.DataFrame(data2dd) if frames[0] > 250: data.to_csv('testVedios' + '/test' + nb_vedio + '/' + number + '_LCR-NET_joints_3DKinect_back.csv', encoding='gbk') data2d.to_csv('testVedios' + '/test' + nb_vedio + '/' + number + '_LCR-NET_joints_2DColor_back.csv', encoding='gbk') data2dd.to_csv('testVedios' + '/test' + nb_vedio + '/' + number + '_LCR-NET_joints_2DDepth_back.csv', encoding='gbk') #data1=pd.DataFrame({'frame':frames, 'angle_left':anglesl, 'angle_right':anglesr}) #data1.to_csv('testVedios'+'/test'+nb_vedio+'/'+number+'_LCR-NET_angles_3DKinect_back.csv',encoding='gbk') else: data2dd.to_csv('testVedios' + '/test' + nb_vedio + '/' + number + '_LCR-NET_joints_2DDepth_front.csv', encoding='gbk') data2d.to_csv('testVedios' + '/test' + nb_vedio + '/' + number + '_LCR-NET_joints_2DColor_front.csv', encoding='gbk') data.to_csv('testVedios' + '/test' + nb_vedio + '/' + number + '_LCR-NET_joints_3DKinect_front.csv', encoding='gbk')
def show_angle3d(nb_video, frames): files=os.listdir('testVideos/test'+nb_video+'/') for i in files: if (len(re.findall('.*C\.json', i))!=0): filejson=i if (len(re.findall('.*D.mp4', i))!=0): filedepth=i number=filedepth[:-5] if (len(re.findall('_PifPaf_joints_3DKinect_front.csv', i))!=0): filedis=i print(number) print(filejson) print(filedis) print('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_3DKinect_back.csv') disdata=pd.DataFrame(pd.read_csv('testVideos/test'+nb_video+'/'+filedis)) fig=plt.figure() ax=fig.add_subplot(211) ax2=fig.add_subplot(212) ax.set_title('3d kinect PifPaf_Original') ax.set_xlabel('Frame') ax.set_ylabel('Angle_left') ax2.set_xlabel('Frame') ax2.set_ylabel('Angle_right') with open('testVideos/test'+nb_video+'/'+filejson) as json_data: d = json.load(json_data) v=cv2.VideoCapture('testVideos/test'+nb_video+'/'+filedepth) anglesl=[] anglesr=[] points3dx=[[] for p in range(17)] points3dy=[[] for p in range(17)] points3dz=[[] for p in range(17)] points2dx=[[] for p in range(17)] points2dy=[[] for p in range(17)] points2ddx=[[] for p in range(17)] points2ddy=[[] for p in range(17)] akdis=[] kndis=[] step=[] bad_frame=[0]*(frames[1]-frames[0]) l_kn_ak=[] r_kn_ak=[] for t in tqdm(range(frames[0],frames[1])): #print(t) v.set(cv2.CAP_PROP_POS_FRAMES, t) ret, im=v.read() img_m=cv2.medianBlur(im,5) font=cv2.FONT_HERSHEY_SIMPLEX ''' for i in range(800,980,2): for j in range(300,860,5): a=RGBto3D((i,j),im) x.append(a[0]) y.append(a[1]) z.append(a[2]) print(i) #ax.scatter(x,z,y) ''' if len(d['frames'][t])!=0: xjoints2d=d['frames'][t][0]['pose2d'][:17] yjoints2d=d['frames'][t][0]['pose2d'][17:] zjoints3d=[] xjoints3d=[] yjoints3d=[] xjoints2dd=[] yjoints2dd=[] leg=['z_las','z_ras','z_lkn','z_rkn','z_lak','z_rak'] for i in range(17): if i in [11,13,15,12,14,16]: z3d=float(disdata[leg[i-11]][disdata['frames']==t]) print(z3d) joints=tools.RGBto3D((xjoints2d[i], yjoints2d[i], z3d), im, t) xjoints3d.append(joints[0]) yjoints3d.append(joints[1]) zjoints3d.append(joints[2]) else: joints=tools.RGBto3D((xjoints2d[i], yjoints2d[i]), im, t, True, 7) xjoints3d.append(joints[0]) yjoints3d.append(joints[1]) zjoints3d.append(joints[2]) joints2dd=tools.RGBtoD((xjoints2d[i], yjoints2d[i]), t) xjoints2dd.append(joints2dd[0]) yjoints2dd.append(joints2dd[1]) anglesl.append(180-tools.angle((xjoints3d[11],yjoints3d[11],zjoints3d[11]),(xjoints3d[13],yjoints3d[13],zjoints3d[13]),(xjoints3d[15],yjoints3d[15],zjoints3d[15]),False)) anglesr.append(180-tools.angle((xjoints3d[12],yjoints3d[12],zjoints3d[12]),(xjoints3d[14],yjoints3d[14],zjoints3d[14]),(xjoints3d[16],yjoints3d[16],zjoints3d[16]),False)) # static analysis # save the important information:dis between ankles, as height, leg length l_kn_ak.append(tools.get_distance((xjoints3d[12],yjoints3d[12],zjoints3d[12]),(xjoints3d[13],yjoints3d[13],zjoints3d[13]))) r_kn_ak.append(tools.get_distance((xjoints3d[9],yjoints3d[9],zjoints3d[9]),(xjoints3d[10],yjoints3d[10],zjoints3d[10]))) dak=(zjoints3d[10]-zjoints3d[13]) dkn=(zjoints3d[9]-zjoints3d[12]) akdis.append(dak) kndis.append(dkn) for p in range(17): points3dx[p].append(xjoints3d[p]) points3dy[p].append(yjoints3d[p]) points3dz[p].append(zjoints3d[p]) points2dx[p].append(xjoints2d[p]) points2dy[p].append(yjoints2d[p]) points2ddx[p].append(xjoints2dd[p]) points2ddy[p].append(yjoints2dd[p]) v.release() #b, a = signal.butter(8, 0.3, 'lowpass') #anglesl = signal.filtfilt(b, a, anglesl) #anglesr = signal.filtfilt(b, a, anglesr) frames=range(frames[0],frames[1]) ax.plot(frames, anglesl) ax2.plot(frames, anglesr, color='r') data2d={} data={} data2dd={} data2dd['frames']=frames data2d['frames']=frames data['frames']=frames data['kangle_l']=anglesl data['kangle_r']=anglesr data['akdis']=akdis data['kndis']=kndis data['l_kn_ak']=l_kn_ak data['r_kn_ak']=r_kn_ak data['bad_frame']=bad_frame j=['head','leye','reye','lear','rear','lsh','rsh','lel','rel','lwr','rwr','las','ras','lkn','rkn','lak','rak'] for p in range(17): data['x_'+j[p]]=points3dx[p] data['y_'+j[p]]=points3dy[p] data['z_'+j[p]]=points3dz[p] data2d['x_'+j[p]]=points2dx[p] data2d['y_'+j[p]]=points2dy[p] data2dd['x_'+j[p]]=points2ddx[p] data2dd['y_'+j[p]]=points2ddy[p] data=pd.DataFrame(data) data2d=pd.DataFrame(data2d) data2dd=pd.DataFrame(data2dd) if frames[0]>250: data.to_csv('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_3DKinect_back.csv',encoding='gbk') data2d.to_csv('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_2DColor_back.csv',encoding='gbk') data2dd.to_csv('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_2DDepth_back.csv',encoding='gbk') #data1=pd.DataFrame({'frame':frames, 'angle_left':anglesl, 'angle_right':anglesr}) #data1.to_csv('testVedios'+'/test'+nb_vedio+'/'+number+'_LCR-NET_angles_3DKinect_back.csv',encoding='gbk') else: data2dd.to_csv('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_2DDepth_front.csv',encoding='gbk') data2d.to_csv('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_2DColor_front.csv',encoding='gbk') data.to_csv('testVideos'+'/test'+nb_video+'/'+number+'_PifPaf_joints_3DKinect_front.csv',encoding='gbk')
def get_distance_from_target(self): return tools.get_distance((self.unit.x, self.unit.y), (self.unit.current_destination[0], self.unit.current_destination[1]))