def intersectingEdges(self, x1y1, x2y2, points): """For each edge formed by `points', yield the intersection with the line segment `(x1,y1) - (x2,y2)`, if it exists. Also return the distance of `(x2,y2)' to the middle of the edge along with its index, so that the one closest can be chosen.""" x1, y1 = x1y1 x2, y2 = x2y2 for i in range(4): x3, y3 = points[i] x4, y4 = points[(i + 1) % 4] denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3) nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) if denom == 0: # This covers two cases: # nua == nub == 0: Coincident # otherwise: Parallel continue ua, ub = nua / denom, nub / denom if 0 <= ua <= 1 and 0 <= ub <= 1: x = x1 + ua * (x2 - x1) y = y1 + ua * (y2 - y1) m = QPointF((x3 + x4) / 2, (y3 + y4) / 2) d = distance(m - QPointF(x2, y2)) yield d, i, (x, y)
def explode(self, x, y, r): positions = get_border_positions(x, 50) for x in positions: for py in range(max(y - r, 0), min(y + r, MAP_H)): for px in range(max(x - r, 0), min(x + r, MAP_W)): if distance(x, y, px, py) >= r: continue self.collision_map[py][px] = False
def analyzePose(): global cur_x global cur_y global cur_theta global flag_arrive_last_checkpoint global flag_in_returning while running_flag.isSet(): if pose_queue is None: rospy.loginfo('analyzePose: main process exit! exit') return if pose_queue.empty(): continue pose_record = pose_queue.get() pose_pos, pose_time = pose_record[0], pose_record[1] #convert to x, y, angle cur_x = pose_pos.position.x cur_y = pose_pos.position.y rot_q = pose_pos.orientation (_, _, cur_theta) = euler_from_quaternion( [rot_q.x, rot_q.y, rot_q.z, rot_q.w]) #convert form radius to degree cur_theta = radiou2dgree(cur_theta) # rospy.loginfo("current position: x-{}, y-{}, theta-{}".format(cur_x, cur_y, cur_theta)) #put into post_pose_cache for uploading #value at index [0] is to indicate: 0--pos record, 1--event record post_pose_queue.put( (0, cur_x, cur_y, cur_theta, pose_time.isoformat("T"))) #robot not arrive at a point or already leave a point if robot_status['route_point_no'] is None or len( robot_status['leave']) > 0: continue #tell if robot leave current point if (pose_time - robot_status['enter'][1]).total_seconds() > (config.Holding_Time) or \ distance(robot_status['holding_pos'], (cur_x, cur_y, cur_theta)) > config.Valid_Range_Radius: lock.acquire() robot_status['leave'] = (cur_theta, pose_time) rospy.loginfo( 'ananlyzePose: find leave waypoint time, the record of current waypoint is: \n {}' .format(robot_status)) post_pose_queue.put((1, robot_status['route_point_no'], robot_status['enter'][1].isoformat("T"), robot_status['leave'][1].isoformat("T"))) if flag_arrive_last_checkpoint: rospy.loginfo( 'set in returning flag at leaving the last checkpoint') flag_in_returning = True resetRbotStatus() lock.release() continue
def _calc_damage(self, x, y, data): positions = get_border_positions(x, 50) dmg = [] for x in positions: r = data['r'] # bomb radius r2 = 8 # tank radius dist = distance(x, y, data['x'], data['y']) + r2 if dist > r * 2: dmg.append(0) continue dmg.append(min(100, 100 * (r * .4 / dist))) dmg.sort(reverse=True) return dmg[0]
def explode(self, x, y, r): positions = get_border_positions(x, 50) for x in positions: for py in range(max(y - r, 0), min(y + r, MAP_H)): for px in range(max(x - r, 0), min(x + r, MAP_W)): if distance(x, y, px, py) >= r: continue self.collision_map[py][px] = False self.map_pixels[(py * MAP_W + px) * 4 + 3] = b'\x00' self.map_img = sf.Image.from_pixels(MAP_W, MAP_H, b''.join(self.map_pixels)) self.map_tex = sf.Texture.from_image(self.map_img) self.map_sprite.texture = self.map_tex
def k_nearest(self, cantidad_vecinos, vector, id): ''' 1 .- Orden en que se ingresan en la pila. Comparar dimension y decidie izquierda o derecha. 2 .- Verificar si el subarbol puede contener vecinos cercanos. Para eso se calcula la distancia en la dimension de corte. ''' if cantidad_vecinos > self.size: raise Exception( "Se esta solicitando una cantidad mayor a los nodos del arbol") nodes = list() nodes.append(self.root) vecinos = list() distancia_maxima = 10000 while len(nodes) != 0: node = nodes.pop(len(nodes) - 1) distancia = distance(vector, node.vector) if distancia < distancia_maxima and node.correlative[0].id != id: if len(vecinos) < cantidad_vecinos: vecinos.append((distancia, node.correlative)) else: vecinos.pop(-1) vecinos.append((distancia, node.correlative)) vecinos = sorted(vecinos, key=lambda x: x[0]) aux_max = 0 for i in vecinos: if i[0] > aux_max: aux_max = i[0] distancia_maxima = aux_max if node.left_node is not None: nodes.append(node.left_node) if node.right_node is not None: nodes.append(node.right_node) return vecinos
def generalize_locations(self, locations, subs_mvmt_amt=32): """ Takes a list of tuples and removes entries within that show little movement Args: locations (list): A list of tuples of type (timestamp, list) [subs_mvmt_amt] (int): How many pixels of movement are considered "Substantial" Returns: List of Tuples that has culled insubstantial movements """ ret = [] last_location = None for timestamp, location in locations: # Make sure we aren't dealin with accidental NaN vals if contains_nan(location): continue # Calc the center because we don't really care about hitboxes as a whole center_x = int(location[0] + location[2]) // 2 center_y = int(location[1] + location[3]) // 2 if last_location is None: ret.append((timestamp, location)) last_location = (center_x, center_y) else: # If the X or Y movement is greater than the substantial amount set if ( distance(center_x, center_y, last_location[0], last_location[1]) > subs_mvmt_amt ): ret.append((timestamp, location)) last_location = (center_x, center_y) # Ensure that there are at least 2 points if that was given.. if len(ret) < 2: if len(locations) >= 2: ret.append(locations[len(locations) - 1]) else: ret = [] return ret
def closeEnough(self, p1, p2): #d = distance(p1 - p2) #m = (p1-p2).manhattanLength() # print "d %.2f, m %d, %.2f" % (d, m, d - m) return distance(p1 - p2) < self.epsilon
def __call__(self, context): objects = context['objects'] context['exit_masks'] = self.exit_masks context['pathes'] = self.pathes context['vehicle_count'] = self.vehicle_count if not objects: return context points = np.array(objects)[:, 0:2] points = points.tolist() if not self.pathes: for match in points: self.pathes.append([match]) else: new_pathes = [] for path in self.pathes: _min = 999999 _match = None for p in points: if len(path) == 1: d = utils.distance(p[0], path[-1][0]) else: xn = 2 * path[-1][0][0] - path[-2][0][0] yn = 2 * path[-1][0][1] - path[-2][0][1] d = utils.distance(p[0], (xn, yn), x_weight=self.x_weight, y_weight=self.y_weight) if d < _min: _min = d _match = p if _match and _min <= self.max_dst: points.remove(_match) path.append(_match) new_pathes.append(path) if _match is None: new_pathes.append(path) self.pathes = new_pathes if len(points): for p in points: if self.check_exit(p[1]): continue self.pathes.append([p]) for i, _ in enumerate(self.pathes): self.pathes[i] = self.pathes[i][self.path_size * -1:] new_pathes = [] for i, path in enumerate(self.pathes): d = path[-2:] if (len(d) >= 2 and not self.check_exit(d[0][1]) and self.check_exit(d[1][1]) and self.path_size <= len(path)): self.vehicle_count += 1 # a47 vehicle_count[0] = self.vehicle_count img = context['frame'] contour, centroid = path[-1][:2] x, y, w, h = contour img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.imwrite( current_path + "/detected_vehicles/vehicle" + str(vehicle_count[0]) + ".png", img[y:y + h - 1, x:x + w]) #HEURISTIC SPEED PREDICTION for i, path in enumerate(context['pathes']): path = np.array(path)[:, 1].tolist() #print str(len(path)) if (path[len(path) - 1][1] - path[len(path) - 2][1] < 0): speed = abs(path[len(path) - 1][1] - path[len(path) - 2][1]) * 10 if (speed == 0): print(" was not be predicted") speed_cache[0] = "n.a." direction_cache[0] = "n.a." else: if (path[len(path) - 1][0] > 250): speed = speed * 3 / 4 speed_cache[0] = speed direction_cache[0] = "upward" if (speed < 100): print " SPEED (upward): " + str(speed) speed_cache[0] = speed direction_cache[0] = "upward" else: speed = speed / 5 * 3 if (speed > 100): speed = speed / 5 * 3 speed_cache[0] = speed direction_cache[0] = "upward" print " SPEED (upward): " + str(speed) else: print " SPEED (upward): " + str(speed) speed_cache[0] = speed direction_cache[0] = "upward" else: speed = abs(path[len(path) - 1][1] - path[len(path) - 2][1]) if (speed < 3): speed = speed * 24 else: speed = speed * 12 if (speed == 0): print("was not be predicted") speed_cache[0] = "n.a" direction_cache[0] = "downward" elif (speed < 100): print "SPEED (downward): " + str(speed) speed_cache[0] = speed direction_cache[0] = "downward" else: speed = speed / 5 * 3 if (speed > 100): speed = speed / 5 * 3 speed_cache[0] = speed direction_cache[0] = "downward" print "SPEED (downward): " + str(speed) else: print "SPEED (downward): " + str(speed) speed_cache[0] = speed direction_cache[0] = "downward" for point in path: cv2.circle(img, point, 2, CAR_COLOURS[0], -1) # a47 cv2.polylines(img, [np.int32(path)], False, CAR_COLOURS[0], 1) time_stamp = str(datetime.datetime.now()) direction = str(direction_cache[0]) speed = str(speed_cache[0]) csv_line = time_stamp + "," + direction + "," + speed with open('traffic_measurement.csv', 'a') as f: writer = csv.writer(f) writer.writerows([csv_line.split(',')]) #HEURISTIC SPEED PREDICTION else: add = True for p in path: if self.check_exit(p[1]): add = False break if add: new_pathes.append(path) self.pathes = new_pathes context['pathes'] = self.pathes context['objects'] = objects context['vehicle_count'] = self.vehicle_count return context
def nearestVertex(self, point, epsilon): for i, p in enumerate(self.points): if distance(p - point) <= epsilon: return i return None
def main(cap_queue): yolo = YOLO() sort = Sort(iou_threshold=0.05) whitelist = [ "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "truck", "boat", "fire hydrant", "stop sign", "parking meter", "bird", "cat", "dog", "backpack", "umbrella", "handbag", "suitcase", ] tally = 0 while tally < 1332: frame = cap_queue.get(block=True) if type(frame) == int and frame == -1: return outputs = yolo.get_results(frame) detections = [] labels = [] for output in outputs: label = yolo.classes[int(output[-1])] if not label in whitelist: del label continue tl = tuple(output[1:3].int()) br = tuple(output[3:5].int()) detections.append( np.array( [tl[0].item(), tl[1].item(), br[0].item(), br[1].item()])) labels.append(label) del tl, br, label del outputs detections = np.array(detections) if detections.shape[0] > 0: try: alive, _ = sort.update(detections, labels) except IndexError: del frame, detections, labels continue for trk in alive: t = trk.get_state()[0] try: bbox = [int(t[0]), int(t[1]), int(t[2]), int(t[3])] except ValueError: continue cv2.rectangle( frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), trk.color, 2, ) cv2.putText( frame, "{}:id {}".format(trk.get_label(), str(trk.id)), (int(bbox[0]), int(bbox[1]) - 12), cv2.FONT_HERSHEY_SIMPLEX, 0.0005 * frame.shape[0], trk.color, 2, ) for location in trk.locations: x1, y1, x2, y2 = location[1] cv2.circle(frame, (((int(x1) + int(x2)) // 2), int(y2)), 3, trk.color, -1) del x1, y1, x2, y2 if len(alive) > 1: for trk2 in alive: if trk == trk2: continue t2 = trk2.get_state()[0] try: bbox2 = [ int(t2[0]), int(t2[1]), int(t2[2]), int(t2[3]) ] except ValueError: continue d = distance(bbox, bbox2) color = (255, 255, 255) if d > 50 else (0, 255, 255) cv2.line( frame, ((bbox[0] + bbox[2]) // 2, int(bbox[3])), ((bbox2[0] + bbox2[2]) // 2, int(bbox2[3])), color, 2, ) cv2.putText( frame, str(d), midpoint(bbox, bbox2), cv2.FONT_HERSHEY_SIMPLEX, 0.0005 * frame.shape[0], color, 2, ) del d del t, bbox cv2.imshow("Object Tracking", frame) cv2.imwrite("./tracked/{}.png".format(tally), frame) tally += 1 key = cv2.waitKey(1) & 0xFF if key == ord("q"): raise KeyboardInterrupt