コード例 #1
0
ファイル: ecobici_ba.py プロジェクト: ustroetz/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        data = scraper.request(self.feed_url)
        tree = etree.XML(data.encode('utf-8'))

        namespaces = {
            'Bicicletas': 'http://bicis.buenosaires.gob.ar/ServiceBicycle.asmx'
        }

        stations_XML = tree.xpath('//Bicicletas:Estacion', namespaces=namespaces)

        stations = []
        for station_XML in stations_XML:
            station           = BikeShareStation()
            uid               = station_XML.find('Bicicletas:EstacionId', namespaces=namespaces).text
            address           = station_XML.find('Bicicletas:Lugar', namespaces=namespaces).text + ' ' + station_XML.find('{http://bicis.buenosaires.gob.ar/ServiceBicycle.asmx}Numero').text

            station.name      = station_XML.find('Bicicletas:EstacionNombre', namespaces=namespaces).text
            station.latitude  = station_XML.find('Bicicletas:Latitud', namespaces=namespaces).text
            station.longitude = station_XML.find('Bicicletas:Longitud', namespaces=namespaces).text
            station.bikes     = int(station_XML.find('Bicicletas:AnclajesTotales', namespaces=namespaces).text)
            station.free      = int(station_XML.find('Bicicletas:BicicletaDisponibles', namespaces=namespaces).text)

            station.extra = {
                'uid': uid,
                'address': address
            }

            if station.latitude and station.longitude:
                station.latitude = float(station.latitude)
                station.longitude = float(station.longitude)
                stations.append(station)

        self.stations = stations
コード例 #2
0
ファイル: gewista_citybike.py プロジェクト: R0nd/pybikes
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper()
     data = scraper.request(self.endpoint)
     tree = etree.fromstring(data.encode('utf-8'))
     markers = tree.xpath('//station')
     self.stations = map(GewistaStation, markers)
コード例 #3
0
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper()
     data = scraper.request(self.endpoint)
     tree = etree.fromstring(data.encode('utf-8'))
     markers = tree.xpath('//station')
     self.stations = list(map(GewistaStation, markers))
コード例 #4
0
ファイル: nextbike.py プロジェクト: njoylab/pybikes
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper(cache)
     domain_xml = etree.fromstring(
         scraper.request(self.url).encode('utf-8'))
     places = domain_xml.xpath(CITY_QUERY.format(uid=self.uid))
     self.stations = filter(None, map(NextbikeStation, places))
コード例 #5
0
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        raw = scraper.request(self.feed_url)
        tree = etree.fromstring(raw)
        stations = []
        for location in tree.xpath('//location'):
            station = BikeShareStation()
            uid     = location.find('Id').text
            address = location.find('Address').text

            station.name      = "%s - %s" % (uid, address)
            station.latitude  = float(location.find('Latitude').text)
            station.longitude = float(location.find('Longitude').text)
            station.bikes     = int(location.find('Bikes').text)
            station.free      = int(location.find('Dockings').text)

            station.extra = {
                'uid': uid,
                'address': address
            }

            stations.append(station)

        self.stations = stations
コード例 #6
0
ファイル: decobike.py プロジェクト: BenSto/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        raw = scraper.request(self.feed_url)
        tree = etree.fromstring(raw)
        stations = []
        for location in tree.xpath('//location'):
            station = BikeShareStation()
            uid     = location.find('Id').text
            address = location.find('Address').text

            station.name      = "%s - %s" % (uid, address)
            station.latitude  = float(location.find('Latitude').text)
            station.longitude = float(location.find('Longitude').text)
            station.bikes     = int(location.find('Bikes').text)
            station.free      = int(location.find('Dockings').text)

            station.extra = {
                'uid': uid,
                'address': address
            }

            stations.append(station)

        self.stations = stations
コード例 #7
0
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper(cache)
     domain_xml = etree.fromstring(
         scraper.request(self.url).encode('utf-8'))
     places = domain_xml.xpath(CITY_QUERY.format(uid=self.uid))
     self.stations = map(NextbikeStation, self.filter_stations(places))
コード例 #8
0
ファイル: pegbike.py プロジェクト: usepedal/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()

        body = json.dumps({"tipo": "getstations"})
        json_data = scraper.request(
                self.feed_url,
                method='POST',
                data=body,
                headers={"Content-Type": "application/json"}
        )

        station_data = json.loads(json_data)
        stations = []
        for data in station_data:
            station = BikeShareStation()
            station.name = data['nome']
            station.latitude = float(data['latitude'])
            station.longitude = float(data['longitude'])
            station.bikes = int(data['bikes'])
            station.free = int(data['vagas'])
            station.extra = {
                'address': data['endereco'],
                'uid': int(data['id']),
                'online': data['status'] == u'Em operação'
            }
            stations.append(station)
        self.stations = stations
コード例 #9
0
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        raw = scraper.request(self.feed_url, 'GET', None, None, True)
        tree = etree.fromstring(raw)
        stationList = tree.xpath('/RESPUESTA/LISTA/DETALLE')

        self.stations = map(BilbonBiziStation, stationList)
コード例 #10
0
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper(cache)
     domain_xml = etree.fromstring(
         scraper.request(self.url).encode('utf-8'))
     places = domain_xml.xpath(
         '/markers/country/city[@uid="{uid}"]/place'.format(uid=self.uid))
     # We want to raise an error if a uid is invalid, right?
     assert places, "Not found: uid {!r}, domain {!r}, url {}".format(
         self.uid, self.domain, self.url)
     self.stations = map(NextbikeStation, self.filter_stations(places))
コード例 #11
0
ファイル: nextbike.py プロジェクト: bcaller/pybikes
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper(cache)
     domain_xml = etree.fromstring(
         scraper.request(self.url).encode('utf-8')
     )
     places = domain_xml.xpath(
         '/markers/country/city[@uid="{uid}"]/place'.format(uid=self.uid)
     )
     # We want to raise an error if a uid is invalid, right?
     assert places, "Not found: uid {!r}, domain {!r}, url {}".format(
         self.uid, self.domain, self.url
     )
     self.stations = map(NextbikeStation, self.filter_stations(places))
コード例 #12
0
ファイル: ecobici_ba.py プロジェクト: beneeng/pybikes3
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        data = scraper.request(self.feed_url)
        tree = etree.XML(data.encode('utf-8'))

        stations_xml = tree.xpath('//b:Estacion', namespaces=NS)
        stations = []
        for station_xml in stations_xml:
            try:
                station = EcobiciBAStation(station_xml)
            except InvalidStation:
                continue
            stations.append(station)
        self.stations = stations
コード例 #13
0
ファイル: ecobici_ba.py プロジェクト: R0nd/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        data = scraper.request(self.feed_url)
        tree = etree.XML(data.encode('utf-8'))

        stations_xml = tree.xpath('//b:Estacion', namespaces=NS)
        stations = []
        for station_xml in stations_xml:
            try:
                station = EcobiciBAStation(station_xml)
            except InvalidStation:
                continue
            stations.append(station)
        self.stations = stations
コード例 #14
0
ファイル: gbfs.py プロジェクト: usepedal/pybikes
    def update(self, scraper=None):
        scraper = scraper or PyBikesScraper()

        feeds = self.get_feeds(self.feed_url, scraper, self.force_https)

        # Station Information and Station Status data retrieval
        station_information = json.loads(
            scraper.request(feeds['station_information']))['data']['stations']
        station_status = json.loads(scraper.request(
            feeds['station_status']))['data']['stations']
        # Aggregate status and information by uid
        # Note there's no guarantee that station_status has the same
        # station_ids as station_information.
        station_information = {s['station_id']: s for s in station_information}
        station_status = {s['station_id']: s for s in station_status}
        # Any station not in station_information will be ignored
        stations = [(station_information[uid], station_status[uid])
                    for uid in station_information.keys()]
        self.stations = []
        for info, status in stations:
            info.update(status)
            try:
                station = GbfsStation(info)
            except exceptions.StationPlannedException:
                continue
            self.stations.append(station)
コード例 #15
0
ファイル: ecobici_ba.py プロジェクト: ustroetz/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()
        data = scraper.request(self.feed_url)
        tree = etree.XML(data.encode('utf-8'))

        namespaces = {
            'Bicicletas': 'http://bicis.buenosaires.gob.ar/ServiceBicycle.asmx'
        }

        stations_XML = tree.xpath('//Bicicletas:Estacion',
                                  namespaces=namespaces)

        stations = []
        for station_XML in stations_XML:
            station = BikeShareStation()
            uid = station_XML.find('Bicicletas:EstacionId',
                                   namespaces=namespaces).text
            address = station_XML.find(
                'Bicicletas:Lugar', namespaces=namespaces
            ).text + ' ' + station_XML.find(
                '{http://bicis.buenosaires.gob.ar/ServiceBicycle.asmx}Numero'
            ).text

            station.name = station_XML.find('Bicicletas:EstacionNombre',
                                            namespaces=namespaces).text
            station.latitude = station_XML.find('Bicicletas:Latitud',
                                                namespaces=namespaces).text
            station.longitude = station_XML.find('Bicicletas:Longitud',
                                                 namespaces=namespaces).text
            station.bikes = int(
                station_XML.find('Bicicletas:AnclajesTotales',
                                 namespaces=namespaces).text)
            station.free = int(
                station_XML.find('Bicicletas:BicicletaDisponibles',
                                 namespaces=namespaces).text)

            station.extra = {'uid': uid, 'address': address}

            if station.latitude and station.longitude:
                station.latitude = float(station.latitude)
                station.longitude = float(station.longitude)
                stations.append(station)

        self.stations = stations
コード例 #16
0
ファイル: bicicard.py プロジェクト: R0nd/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()

        status_fuzzle = scraper.request(self.status_url)

        location_dom  = etree.fromstring(self.kml_file)
        status_dom    = html.fromstring(status_fuzzle)

        placemarks = location_dom.xpath("//kml:Placemark",
                                        namespaces = _kml_ns)
        stations = []
        for placemark in placemarks:
            name = placemark.findtext('kml:name', namespaces = _kml_ns)
            name_id = placemark.findtext('kml:description',
                                      namespaces = _kml_ns)
            coor = map(
                float, placemark.findtext('.//kml:coordinates',
                                          namespaces = _kml_ns).
                       split(',')[0:2]
            )

            # Find a status table with the name_id of this station, XPath
            # performance on this query is not really costly so far.
            try:
                (status,) = status_dom.xpath(_xpath_q % name_id)
            except ValueError:
                # Not found.. move along?
                continue

            m = re.search(_re_bikes_slots, status)
            bikes = int(m.group('bikes'))
            slots = int(m.group('slots'))

            station = BikeShareStation()
            station.name       = name
            station.latitude   = coor[1]
            station.longitude  = coor[0]
            station.bikes      = bikes
            station.free       = slots - bikes
            station.extra      = { 'slots': slots }

            stations.append(station)

        self.stations = stations
コード例 #17
0
ファイル: youbike.py プロジェクト: usepedal/pybikes
 def update(self, scraper=None):
     scraper = scraper or PyBikesScraper(cache)
     html = scraper.request(self.main_url)
     data_m = re.search(r'siteContent=\'({.+?})\';', html)
     data = json.loads(data_m.group(1))
     filtered_data = filter_bounds(
         data.itervalues(), lambda s: (float(s['lat']), float(s['lng'])),
         *self.city_bounds)
     self.stations = list(map(YouBikeStation, filtered_data))
コード例 #18
0
ファイル: bicicard.py プロジェクト: beneeng/pybikes3
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()

        status_fuzzle = scraper.request(self.status_url)

        location_dom = etree.fromstring(self.kml_file)
        status_dom = html.fromstring(status_fuzzle)

        placemarks = location_dom.xpath("//kml:Placemark", namespaces=_kml_ns)
        stations = []
        for placemark in placemarks:
            name = placemark.findtext('kml:name', namespaces=_kml_ns)
            name_id = placemark.findtext('kml:description', namespaces=_kml_ns)
            coor = list(
                map(
                    float,
                    placemark.findtext('.//kml:coordinates',
                                       namespaces=_kml_ns).split(',')[0:2]))

            # Find a status table with the name_id of this station, XPath
            # performance on this query is not really costly so far.
            try:
                (status, ) = status_dom.xpath(_xpath_q % name_id)
            except ValueError:
                # Not found.. move along?
                continue

            m = re.search(_re_bikes_slots, status)
            bikes = int(m.group('bikes'))
            slots = int(m.group('slots'))

            station = BikeShareStation()
            station.name = name
            station.latitude = coor[1]
            station.longitude = coor[0]
            station.bikes = bikes
            station.free = slots - bikes
            station.extra = {'slots': slots}

            stations.append(station)

        self.stations = stations
コード例 #19
0
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()

        scraper.headers.update(self.headers)

        page = 1
        places = []
        while page:
            resp = scraper.request(
                BASE_URL.format(uid=self.uid, page=page) + str(self.page_size))
            data = json.loads(resp)
            if page * self.page_size > data['total_entries']:
                page = 0
            else:
                page += 1

            places.extend(data['items'])

        self.stations = map(SocialBicyclesStation, places)
コード例 #20
0
ファイル: nextbike.py プロジェクト: usepedal/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper(cache)
        domain_xml = etree.fromstring(
            scraper.request(self.url).encode('utf-8'))
        places = domain_xml.xpath(
            '/markers/country/city[@uid="{uid}"]/place'.format(uid=self.uid))
        # We want to raise an error if a uid is invalid, right?
        assert places, "Not found: uid {!r}, domain {!r}, url {}".format(
            self.uid, self.domain, self.url)
        if self.bbox:

            def getter(place):
                lat, lng = place.attrib['lat'], place.attrib['lng']
                return (float(lat), float(lng))

            places = filter_bounds(places, getter, self.bbox)
        # For now ignore bikes roaming around
        places = filter(lambda p: p.attrib.get('bike', '') != '1', places)

        self.stations = list(map(NextbikeStation, places))
コード例 #21
0
ファイル: socialbicycles.py プロジェクト: eskerda/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper()

        scraper.headers.update(self.headers)

        page = 1
        places = []
        while page:
            resp = scraper.request(
                BASE_URL.format(uid=self.uid, page=page) + str(self.page_size)
            )
            data = json.loads(resp)
            if page * self.page_size > data['total_entries']:
                page = 0
            else:
                page += 1

            places.extend(data['items'])

        self.stations = map(SocialBicyclesStation, places)
コード例 #22
0
ファイル: nextbike.py プロジェクト: eskerda/pybikes
    def update(self, scraper=None):
        if scraper is None:
            scraper = PyBikesScraper(cache)
        domain_xml = etree.fromstring(
            scraper.request(self.url).encode('utf-8')
        )
        places = domain_xml.xpath(
            '/markers/country/city[@uid="{uid}"]/place'.format(uid=self.uid)
        )
        # We want to raise an error if a uid is invalid, right?
        assert places, "Not found: uid {!r}, domain {!r}, url {}".format(
            self.uid, self.domain, self.url
        )
        if self.bbox:
            def getter(place):
                lat, lng = place.attrib['lat'], place.attrib['lng']
                return (float(lat), float(lng))
            places = filter_bounds(places, getter, self.bbox)
        # For now ignore bikes roaming around
        places = filter(lambda p: p.attrib.get('bike', '') != '1', places)

        self.stations = map(NextbikeStation, places)
コード例 #23
0
ファイル: nextgal.py プロジェクト: wassupben/pybikes
 def update(self, scraper=None):
     scraper = scraper or PyBikesScraper()
     scraper.setUserAgent('kSOAP/2.0')
     scraper.headers.update({
         'SOAPAction': 'http://aparcabicis.nextgal.es/GetEstaciones',
         'Content-Type': 'text/xml',
     })
     data = scraper.request(urljoin(self.url, PATH),
                            method='POST',
                            data=PAYLOAD)
     tree = etree.XML(data.encode('utf-8'))
     stations_xml = tree.xpath('//ab:EstacionAdditionalInformationDto',
                               namespaces=NS)
     self.stations = map(NextgalStation, stations_xml)
コード例 #24
0
    def update(self, scraper=None):
        scraper = scraper or PyBikesScraper()
        scraper.headers.update(headers)

        api_info_data = scraper.request(API_INFO_URL)
        api_info_match = re.search(r'MobipalmaMapa\((.*})\);', api_info_data)
        if not api_info_match:
            raise Exception('Mobipalma API info not found on website')

        api_info = json.loads(api_info_match.group(1))
        scraper.headers.update({
            'Authorization': 'Bearer %s' % api_info['token_data']['token'],
        })
        stations = json.loads(scraper.request(API_STATIONS_URL))
        status = json.loads(scraper.request(API_STATUS_URL))

        stations = {s['id']: s for s in stations}
        status = {s['id']: s for s in status}

        stations = [
            (stations[uid], status[uid]) for uid in stations.keys()
        ]

        self.stations = list(self.parse_stations(stations))
コード例 #25
0
ファイル: ecobici_ba.py プロジェクト: usepedal/pybikes
 def update(self, scraper=None):
     # Patch default scraper request method
     scraper = scraper or PyBikesScraper()
     EcobiciBA.authorize(scraper, self.key)
     super(EcobiciBA, self).update(scraper)
コード例 #26
0
ファイル: domoblue.py プロジェクト: R0nd/pybikes
parser.add_argument('--proxy', metavar = "host:proxy", dest = 'proxy', 
                    default = None, help="Use host:port as a proxy for site calls")

parser.add_argument('-v', action="store_true", dest = 'verbose', 
                    default = False, help="Verbose output for debugging (no progress)")

args = parser.parse_args()

outfile = args.outfile

proxies = {}

user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19'

scraper = PyBikesScraper()
scraper.setUserAgent(user_agent)

sysdef = {
    "system": "domoblue",
    "class": "Domoblue",
    "instances": []
}

if args.proxy is not None:
    proxies['http'] = args.proxy
    scraper.setProxies(proxies)
    scraper.enableProxy()


def get_token(client_id):
コード例 #27
0
 def update(self, scraper=None):
     if scraper is None:
         scraper = PyBikesScraper(cache)
     self.stations = map(YouBikeStation, self.get_data(scraper))
コード例 #28
0
parser.add_argument('-v',
                    action="store_true",
                    dest='verbose',
                    default=False,
                    help="Verbose output for debugging (no progress)")

args = parser.parse_args()

outfile = args.outfile

proxies = {}

user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.168 Safari/535.19'

scraper = PyBikesScraper()
scraper.setUserAgent(user_agent)

sysdef = {"system": "domoblue", "class": "Domoblue", "instances": []}

if args.proxy is not None:
    proxies['http'] = args.proxy
    scraper.setProxies(proxies)
    scraper.enableProxy()


def get_token(client_id):
    if 'Referer' in scraper.headers:
        del (scraper.headers['Referer'])
    url = MAIN + TOKEN_URL.format(service=client_id)
    data = scraper.request(url)