コード例 #1
0
 def testS2LatLong(self):
     point = Point.FromLatLng(30, 40)
     self.assertPointApproxEq(Point(0.663413948169, 0.556670399226, 0.5),
                              point)
     (lat, lng) = point.ToLatLng()
     self.assertApproxEq(30, lat)
     self.assertApproxEq(40, lng)
コード例 #2
0
    def testCutAtClosestPoint(self):
        poly = Poly()
        poly.AddPoint(Point(0, 1, 0).Normalize())
        poly.AddPoint(Point(0, 0.5, 0.5).Normalize())
        poly.AddPoint(Point(0, 0, 1).Normalize())

        (before,
         after) = poly.CutAtClosestPoint(Point(0, 0.3, 0.7).Normalize())

        self.assert_(2 == before.GetNumPoints())
        self.assert_(2 == before.GetNumPoints())
        self.assertPointApproxEq(Point(0, 0.707106781187, 0.707106781187),
                                 before.GetPoint(1))

        self.assertPointApproxEq(Point(0, 0.393919298579, 0.919145030018),
                                 after.GetPoint(0))

        poly = Poly()
        poly.AddPoint(Point.FromLatLng(40.527035999999995,
                                       -74.191265999999999))
        poly.AddPoint(Point.FromLatLng(40.526859999999999,
                                       -74.191140000000004))
        poly.AddPoint(Point.FromLatLng(40.524681000000001,
                                       -74.189579999999992))
        poly.AddPoint(Point.FromLatLng(40.523128999999997,
                                       -74.188467000000003))
        poly.AddPoint(Point.FromLatLng(40.523054999999999,
                                       -74.188676000000001))
        pattern = Poly()
        pattern.AddPoint(Point.FromLatLng(40.52713, -74.191146000000003))
        self.assertApproxEq(14.564268281551, pattern.GreedyPolyMatchDist(poly))
コード例 #3
0
    def testPolyMatch(self):
        poly = Poly()
        poly.AddPoint(Point(0, 1, 0).Normalize())
        poly.AddPoint(Point(0, 0.5, 0.5).Normalize())
        poly.AddPoint(Point(0, 0, 1).Normalize())

        collection = PolyCollection()
        collection.AddPoly(poly)
        match = collection.FindMatchingPolys(Point(0, 1, 0), Point(0, 0, 1))
        self.assert_(len(match) == 1 and match[0] == poly)

        match = collection.FindMatchingPolys(Point(0, 1, 0), Point(0, 1, 0))
        self.assert_(len(match) == 0)

        poly = Poly()
        poly.AddPoint(Point.FromLatLng(45.585212, -122.586136))
        poly.AddPoint(Point.FromLatLng(45.586654, -122.587595))
        collection = PolyCollection()
        collection.AddPoly(poly)

        match = collection.FindMatchingPolys(
            Point.FromLatLng(45.585212, -122.586136),
            Point.FromLatLng(45.586654, -122.587595),
        )
        self.assert_(len(match) == 1 and match[0] == poly)

        match = collection.FindMatchingPolys(
            Point.FromLatLng(45.585219, -122.586136),
            Point.FromLatLng(45.586654, -122.587595),
        )
        self.assert_(len(match) == 1 and match[0] == poly)

        self.assertApproxEq(0.0, poly.GreedyPolyMatchDist(poly))

        match = collection.FindMatchingPolys(
            Point.FromLatLng(45.587212, -122.586136),
            Point.FromLatLng(45.586654, -122.587595),
        )
        self.assert_(len(match) == 0)
コード例 #4
0
 def testGetDistanceMeters(self):
     point1 = Point.FromLatLng(40.536895, -74.203033)
     point2 = Point.FromLatLng(40.575239, -74.112825)
     self.assertApproxEq(8732.623770873237,
                         point1.GetDistanceMeters(point2))
コード例 #5
0
def main(gtfs_zip_or_dir, feed_url, db_file, interval):
    loader = transitfeed.Loader(feed_path=gtfs_zip_or_dir, memory_db=False)
    schedule = loader.Load()
    agency = schedule.GetAgencyList()[0]
    global time_zone
    time_zone = pytz.timezone(agency.agency_timezone)

    db_manager = DbManager(db_file)

    if not schedule.GetShapeList():
        logging.error("This feed doesn't contain shape.txt file. Exit...")
        return

    active_trips = ActiveTrips()

    logging.info("Start at local time {}".format(datetime.now()))
    while True:
        cnt, all = 0, 0
        before = time.time()
        feed = read_feed(feed_url)
        for entity in feed.entity:
            if entity.HasField('vehicle'):
                trip_id = entity.vehicle.trip.trip_id
                try:
                    trip = schedule.GetTrip(trip_id)
                except KeyError as e:
                    logging.warning(
                        "Faulty trip_id for entity: {}".format(entity))
                    continue
                all += 1
                vehiclePoint = Point.FromLatLng(
                    entity.vehicle.position.latitude,
                    entity.vehicle.position.longitude)
                try:
                    trip_state = TripState(trip, vehiclePoint,
                                           entity.vehicle.stop_id)
                except VehicleOutOfPolylineException as e:
                    logging.warning(
                        "Vehicle {1} is out of shape for trip_id {0}".format(
                            trip_id, (entity.vehicle.position.latitude,
                                      entity.vehicle.position.longitude)))
                    continue
                except StopFarFromPolylineException as e:
                    logging.warning(
                        "Couldn't reach all stops for trip_id {}".format(
                            trip_id))
                    continue

                cur_trip_progress = active_trips.get_trip_progress(trip_id)
                new_progress = trip_state.get_trip_progress()
                if trip_state.get_distance_to_end_stop(
                ) < 100 and cur_trip_progress == new_progress:
                    continue
                if cur_trip_progress is not None and new_progress < cur_trip_progress:
                    logging.warning(
                        "The trip_id {} seems to go backwards. Timestamp {}".
                        format(trip_id, entity.vehicle.timestamp))
                    continue
                if not active_trips.is_trip_active(
                        trip_id) and trip_state.get_prev_stop_seq() > 2:
                    continue

                prev_timestamp = active_trips.get_timestamp_for_trip(trip_id)
                if active_trips.is_trip_active(trip_id):
                    speed = trip_state.get_avrg_speed(
                        entity.vehicle.timestamp - prev_timestamp,
                        new_progress - cur_trip_progress)
                    if speed > 120:  #sanity check
                        logging.warning(
                            "Trip {} is trying to advance too quick -> {}km/h, timestamp {}"
                            .format(trip_id, speed, entity.vehicle.timestamp))
                        continue

                if entity.vehicle.timestamp != prev_timestamp:
                    cnt += 1
                    estimated_time = trip_state.get_estimated_scheduled_time()
                    stop_progress = trip_state.get_stop_progress()
                    delay = calculate_delay(
                        _normalize_time(entity.vehicle.timestamp),
                        estimated_time)
                    active_trips.add_update_trip(trip_id,
                                                 entity.vehicle.timestamp,
                                                 new_progress)
                    start_day = active_trips.get_day_for_trip(trip_id)
                    db_manager.insert_log(entity.vehicle.trip.route_id,
                                          trip_id,
                                          trip_state.get_prev_stop_seq(),
                                          entity.vehicle.timestamp, start_day,
                                          delay, new_progress, stop_progress)

        try:
            db_manager.commit()
        except OperationalError as e:
            logging.warning("Hard drive overload")
            continue

        active_trips.clean_inactive_trips(feed.header.timestamp)
        proc_time = time.time() - before
        logging.info("Procesing time {}. Saved {} out of {} records".format(
            proc_time, cnt, all))
        if interval - proc_time > 0:
            time.sleep(interval - proc_time)
        else:
            logging.warning("Processing is taking too long")