Beispiel #1
1
    def test_connection_error(self):
        """Connecting to an unknown domain should raise a ConnectionError"""
        with pytest.raises(ConnectionError):
            requests.get("http://fooobarbangbazbing.httpbin.org")

        with pytest.raises(ConnectionError):
            requests.get("http://httpbin.org:1")
Beispiel #2
1
    def test_status_raising(self):
        r = requests.get(httpbin("status", "404"))
        with pytest.raises(requests.exceptions.HTTPError):
            r.raise_for_status()

        r = requests.get(httpbin("status", "500"))
        assert not r.ok
Beispiel #3
0
    def on_aaa_response(self, *args):
        message = args[0]

        if ('socketid' in message):
            self._redis_obj.publish('intercomm', message['socketid'])
            self._socketid = message['socketid']

        if ('name' in message):
            log('O', message['name'])
            self._socket_io.emit('send_message', self._executable)

        if ('data' in message):
            log('O', message['data'])
            self._redis_obj.publish('intercomm', '***end***')

        if ('picture' in message):
            log('D', message['picture'])
            file = requests.get(message['picture'])

            with open(self._imagepath + '/result' + str(self._socketid) + '.jpg', 'wb') as f:
                f.write(file.content)

            log('D', 'Image Saved: ' + self._imagepath + '/result' + str(self._socketid) + '.jpg')

        if ('mat' in message):
            log('D', message['mat'])
            file = requests.get(message['mat'])
            with open(self._imagepath + '/results' + self._socketid + '.txt', 'wb') as f:
                f.write(file.content)
            log('D', 'Results Saved: ' + self._imagepath + '/results' + self._socketid + '.txt')

        if ('request_data' in message):
            self._socket_io.emit('send_message', 'data')
Beispiel #4
0
def extract_nearby_cities(item, country):
    res = requests.get(item['detail'])
    r = res.json()

    nearby_cities_url = r['properties']['products']['nearby-cities'][0]['contents']['nearby-cities.json']['url']
    res = requests.get(nearby_cities_url)
    r = res.json()

    out = []
    append = out.append

    j = 0
    for i in r:
        city = 'a ' + str(i['distance']) + ' km al '
        city += translate_string(i['direction']) + ' de ' + i['name']
        city = city.replace(', ' + country, '')
        append(city)
        j += 1
        if j == 3:
            break

    if len(out) > 1:
        out[-1] = 'y ' + out[-1]
    nearby_cities = ', '.join(out)
    nearby_cities = re.sub(', y', ' y', nearby_cities)
    return nearby_cities
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            year = url['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = requests.get(url, headers=h)
            r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = requests.get(r, headers=h)
            r = BeautifulSoup(r.content, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class':'movieplay'}).find('iframe')['src']
            if not quality in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
            return sources
        except:
            print("Unexpected error in Furk Script: check_api", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
Beispiel #6
0
 def test_unicode_get(self):
     url = httpbin("/get")
     requests.get(url, params={"foo": "føø"})
     requests.get(url, params={"føø": "føø"})
     requests.get(url, params={"føø": "føø"})
     requests.get(url, params={"foo": "foo"})
     requests.get(httpbin("ø"), params={"foo": "foo"})
Beispiel #7
0
    def test_where_3_way_logic(self):
        #             This column
        #                 ⇓
        #  ___________________
        # |     | col1 | col2 |
        #  -------------------
        # | r1  |  1   |      | ⇐ This row
        #  -------------------
        # | r2  |  1   |  2   |
        #  -------------------

        response = requests.get(self.dataset_url)
        datasets = json.loads(response.content)
        self.assertEqual(response.status_code, 200,
            msg="Couldn't get the list of datasets")
        self.assertEqual(datasets['status']['rowCount'], 2,
            msg="2 rows should be in the dataset. r1 and r2")

        params = {
            "select":'col1',
            "where":"'col2' < 2"
        }

        response = requests.get(self.dataset_url + '/query', params=params)
        content = json.loads(response.content)
        self.assertEqual(len(content), 0,
            msg="The query should have returned no results")
Beispiel #8
0
    def test_basicauth_with_netrc(self):
        auth = ("user", "pass")
        wrong_auth = ("wronguser", "wrongpass")
        url = httpbin("basic-auth", "user", "pass")

        def get_netrc_auth_mock(url):
            return auth

        requests.sessions.get_netrc_auth = get_netrc_auth_mock

        # Should use netrc and work.
        r = requests.get(url)
        assert r.status_code == 200

        # Given auth should override and fail.
        r = requests.get(url, auth=wrong_auth)
        assert r.status_code == 401

        s = requests.session()

        # Should use netrc and work.
        r = s.get(url)
        assert r.status_code == 200

        # Given auth should override and fail.
        s.auth = wrong_auth
        r = s.get(url)
        assert r.status_code == 401
Beispiel #9
0
 def test_connect_timeout(self):
     try:
         requests.get(TARPIT, timeout=(0.1, None))
         assert False, "The connect() request should time out."
     except ConnectTimeout as e:
         assert isinstance(e, ConnectionError)
         assert isinstance(e, Timeout)
Beispiel #10
0
def hopcam():
    # Should return a list with revmic's dailymotion videos entitled 'yesterday'
    yda_uri = 'https://api.dailymotion.com/videos?owners=revmic&' \
              'search=yesterday&fields=id,title,description,embed_url,' \
              'thumbnail_480_url,views_total'
    r = requests.get(yda_uri)
    print(r.json())

    try:
        # Get last item in list in case there are multiples (prev delete failed)
        yda_video = get_list(r)[-1]
    except IndexError as e:
        yda_video = {'title': "Sorry. Couldn't find yesterday's video :'("}
        print('IndexError - ', e)

    if 'Sorry' in yda_video['title']:  # Try again, rummage through all videos
        print("trying again")
        uri = 'https://api.dailymotion.com/videos?owners=revmic&' \
              'fields=id,title,description,embed_url,' \
              'thumbnail_480_url,views_total'
        videos = get_list(requests.get(uri))
        print(videos)

        for v in videos:
            if v['title'].lower() == 'yesterday':
                yda_video = v

    return render_template('hopcam.html', yesterday=yda_video)
Beispiel #11
0
def get_opendata2_courses():
    good_courses = 0

    file_name = os.path.join(os.path.realpath(os.path.dirname(__file__)),
        '%s/opendata2_departments.json' % c.DEPARTMENTS_DATA_DIR)
    with open(file_name) as departments_file:
        departments = json.load(departments_file)

    # Create a text file for every department
    for d in departments:
        department = d['subject']
        open_data_json = requests.get(
                'https://api.uwaterloo.ca/v2/courses/{0}.json?key={1}'.format(
                department.upper(), s.OPEN_DATA_API_KEY)).json
        open_data_catalog_numbers = []

        for course in open_data_json['data']:
            open_data_catalog_numbers.append(course['catalog_number'])

        # We now poll the individual endpoints of each course for the data
        current_dep_json = []
        course_url = 'https://api.uwaterloo.ca/v2/courses/{0}/{1}.json?key={2}'
        for course in open_data_catalog_numbers:
            good_courses += 1
            json_data = requests.get(course_url.format(department.upper(),
                    course, s.OPEN_DATA_API_KEY)).json
            current_dep_json.append(json_data['data'])

        out_file_name = os.path.join(
                os.path.realpath(os.path.dirname(__file__)),
                'opendata2_courses/%s.json' % department.lower())
        with open(out_file_name, 'w') as courses_out:
            json.dump(current_dep_json, courses_out)

    print 'Found {num} good courses'.format(num=good_courses)
Beispiel #12
0
    def get_speakers_ip(self, refresh=False):
        """ Get the IP addresses of all the Sonos speakers in the network.

        Code contributed by Thomas Bartvig ([email protected])

        Arguments:
        refresh -- Refresh the speakers IP cache.

        Returns:
        IP addresses of the Sonos speakers.

        """
        if self.speakers_ip and not refresh:
            return self.speakers_ip
        else:
            response = requests.get('http://' + self.speaker_ip +
                                    ':1400/status/topology')
            text = response.text
            grp = re.findall(r'(\d+\.\d+\.\d+\.\d+):1400', text)

            for i in grp:
                response = requests.get('http://' + i + ':1400/status')
                if response.status_code == 200:
                    self.speakers_ip.append(i)

            return self.speakers_ip
Beispiel #13
0
def updateCCU(v):
  ccuUrl = "http://pi:8080/api/set"
  try:
    requests.get(ccuUrl + "/AussenTemp/?value=" + str(v.get('outside_temp')))
    requests.get(ccuUrl + "/KollectorTemp/?value=" + str(v.get('collector_temp')))
  except Exception,e:
    logError(e)
Beispiel #14
0
def test_small_layer(svc_url, svc_data):
    """
    Test a service endpoint to see if the layer is small based on some simple rules.

    :param svc_url: The URL pointing to the feature endpoint
    :type svc_url: str
    :param svc_data: A dictionary containing scraped data from an ESRI feature service endpoint
    :type svc_data: dict
    :returns: bool -- True if the layer is considered 'small'
    """
# FIXME needs refactoring, better error handling and better logic
    global _proxies
    try:
        if svc_data['geometryType'] in ('esriGeometryPoint', 'esriGeometryMultipoint', 'esriGeometryEnvelope'):
            count_query = '/query?where=1%3D1&returnCountOnly=true&f=pjson'
            id_query = '/query?where=1%3D1&returnIdsOnly=true&f=json'
            r = requests.get(get_base_url(svc_url) + count_query, proxies=_proxies)
            if 'count' in r.json():
                return r.json()['count'] <= 2000
            r = requests.get(get_base_url(svc_url) + id_query, proxies=_proxies)
            if 'objectIds' in r.json():
                return len(r.json()['objectIds']) <= 2000
    except:
        pass
    return False
  def _request_odl_data(self, host, node_connector_list, flow_statistics_list):

    # Data to export from OpenDaylight.
    data_dict = {}

    try:
        # Flow table statistics per host (eg. opendaylight, compute, control and neutron)
        try:
            table_flow_statistics_url = "%s%s%s%s" % (self._odl_inventory_url,'/node/',self.hosts_dict[host],'/table/0/opendaylight-flow-table-statistics:flow-table-statistics')
            table_flow_statistics = requests.get(table_flow_statistics_url)
            table_flow_statistics.raise_for_status()
            data_dict["table_flow_statistics"] = table_flow_statistics
        except requests.exceptions.HTTPError as err:
            print "Can not retrieve flow table statistics:", err
        # Aggregate flow statistics per host (eg. opendaylight, compute, control and neutron)
        try:
            aggregate_flow_statistics_url = "%s%s%s%s" % (self._odl_inventory_url,'/node/',self.hosts_dict[host],'/table/0/aggregate-flow-statistics/')
            aggregate_flow_statistics = requests.get(aggregate_flow_statistics_url)
            aggregate_flow_statistics.raise_for_status()
            data_dict["aggregate_flow_statistics"] = aggregate_flow_statistics
        except requests.exceptions.HTTPError as err:
            pass
            #print "Can not retrieve aggregate flow statistics:", err

        # Individual flow statistics per host (eg. opendaylight, compute, control and neutron)
        data_dict["flow_statistics_list"] = flow_statistics_list

        # Port statistics per host (eg. opendaylight, compute, control and neutron)
        data_dict["node_connector_list"] = node_connector_list

        return data_dict

    except ConnectionError:
        print("Error fetching data from OpenDaylight.")
def main():
    j = requests.get("https://www.citibikenyc.com/stations/json")
    
    for m in j.json()["stationBeanList"]:
        conn.setnx(m["stationName"], 0)

    availableBikes = {}
    while True:
        # get the citibike response from their API
        r = requests.get("https://www.citibikenyc.com/stations/json")
        for m in r.json()["stationBeanList"]:
            # for each station, initialise the store if necessary
            if m["stationName"] not in availableBikes:
                availableBikes[m["stationName"]] = m["availableBikes"]
                continue

            delta = m["availableBikes"] - availableBikes[m["stationName"]]
            # if the number of bikes have changed, emit a message
            if delta != 0:
                stdout.flush()
                conn.set(m["stationName"], delta)

            # update the store with the new number of available bikes for that station
            availableBikes[m["stationName"]] = m["availableBikes"]

        # set system sleep
        time.sleep(1)
def query(query_term, folder_name, path):

    BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q=' + query_term + '&start=%d'

    BASE_PATH = os.path.join(path, folder_name.replace(' ', '_'))

    if not os.path.exists(BASE_PATH):
        os.makedirs(BASE_PATH)
        print "made: " + BASE_PATH

    start = 0  # start query string parameter for pagination
    while start < 40:   # query 20 pages
        r = requests.get(BASE_URL % start)
        for image_info in json.loads(r.text)['responseData']['results']:
            url = image_info['unescapedUrl']
            try:
                image_r = requests.get(url)
            except ConnectionError, e:
                print 'could not download %s' % url
                continue

            #remove file-system path characters from name
            title = query_term.replace(' ', '_') + '_' + image_info['imageId']
            file = open(os.path.join(BASE_PATH, '%s.jpg') % title, 'w')
            try:
                Image.open(StringIO(image_r.content)).save(file, 'JPEG')
            except IOError, e:
                # throw away gifs and stuff
                print 'couldnt save %s' % url
                continue
            finally:
Beispiel #18
0
def print_physiological_state(bb_thr, rr_thr):
    """ Retrive and print activity metrics of the specified node. """
    # Format the metric requests
    addr = 'http://127.0.0.1:8080'
    bb_request = ('/eegnode/metric/'
                  '{"type":"brainbeat",'
                  '"channels":["ch0", "ch1"],'
                  '"time_window":[15]}')

    rr_request = ('/ecgnode/metric/'
                  '{"type":"mean_hr",'
                  '"channels":["ch0"],'
                  '"time_window":[15],'
                  '"arguments":[100]}')

    # Perform requests
    bb = requests.get(addr + bb_request).json()[0]['return']
    rr = requests.get(addr + rr_request).json()[0]['return']

    if bb > bb_thr and rr > rr_thr:
        mwl_class = '1'
    else:
        mwl_class = '0'

    print('%0.2f\t' % bb + '%0.2f\t' % rr + mwl_class)
Beispiel #19
0
def test_short_app_params_4():
    requests.get(url + '/__engine/1/functions/hello', headers={
        'x-lc-sign': sign_by_master_key
    })
    env = authorization.current_environ
    assert env['_app_params']['key'] is None
    assert env['_app_params']['master_key'] == TEST_MASTER_KEY
    def get_posts_data(
        self, blog, id=None, get_comments=False, *args, **options
    ):
        if self.blog_to_migrate == "just_testing":
            with open('test-data-comments.json') as test_json:
                return json.load(test_json)

        self.url = blog
        headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json',
        }
        if self.username and self.password:
            auth = b64encode(
                str.encode('{}:{}'.format(self.username, self.password)))
            headers['Authorization'] = 'Basic {}'.format(auth)
        if self.url.startswith('http://'):
            base_url = self.url
        else:
            base_url = ''.join(('http://', self.url))
        posts_url = ''.join((base_url, '/wp-json/posts'))
        comments_url = ''.join((posts_url, '/%s/comments')) % id
        if get_comments is True:
            comments_url = ''.join((posts_url, '/%s/comments')) % id
            fetched_comments = requests.get(comments_url)
            comments_data = fetched_comments.text
            comments_data = self.clean_data(comments_data)
            return json.loads(comments_data)
        else:
            fetched_posts = requests.get(posts_url, headers=headers)
            data = fetched_posts.text
            data = self.clean_data(data)
            return json.loads(data)
Beispiel #21
0
 def start_analyzer():
     flag = 1
     next_article = last_article
     counter = 0
     gaplimit = 500
     while flag == 1:
         gaplimit = 50;
         # information_json = urllib.request.urlopen(root + api_get_url + next_article).read()
         # print(str(information_json)[1:])
         counter = counter + 1
         information = requests.get(root + api_get_url + str(gaplimit) + "&gapfrom=" + next_article).json()
         #print(information)
         try:
             while gaplimit>9 and information["query-continue"]["images"]["imcontinue"] != "":
                 print(information["query-continue"]["images"]["imcontinue"])
                 gaplimit -= 10
                 if gaplimit == 0:
                     gaplimit = 1
                 information = requests.get(root + api_get_url + str(gaplimit) + "&gapfrom=" + next_article).json()
                 #print(information)
         except:
             gaplimit = 500
         try:
             next_article = information["query-continue"]["allpages"]["gapcontinue"]
         except:
             flag = 0
         write_or = True
         for page in information["query"]["pages"]:
             if write_or:
                 print(information["query"]["pages"][page]["fullurl"])
                 write_or = False
             WebSurfer.check_page(information["query"]["pages"][page])
def downloadXkcd(startComic, endComic):
    for urlNumber in range(startComic, endComic):
        # download the page
        print('Downloading page http://xkcd.com/%s...' % (urlNumber))
        res = requests.get('http://xkcd.com/%s' % (urlNumber))
        res.raise_for_status()

        soup = bs4.BeautifulSoup(res.text)

        # find the url of the comic image
        comicElem = soup.select('#comic img')
        if comicElem == []:
            print('Could not find comic image.')
        else:
            comicUrl = comicElem[0].get('src')
            # download the image
            print('Downloading image %s...' % (comicUrl))
            res = requests.get(comicUrl)
            res.raise_for_status()

            # sav img to ./xkcd
            imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
            for chunk in res.iter_content(1000000):
                imageFile.write(chunk)
            imageFile.close()
Beispiel #23
0
def login_success(token, profile):
  if profile['email'] in allowed_users:
    return render_template('home.html')
    #return jsonify(token=token, profile=profile)
  else:
    requests.get('https://accounts.google.com/o/oauth2/revoke?token='+token['access_token'])
    return """
Beispiel #24
0
    def run():
        responses.add(
            responses.GET, 'http://example.com/?test=1',
            match_querystring=True)

        with pytest.raises(ConnectionError):
            requests.get('http://example.com/foo/?test=2')
Beispiel #25
0
def main():
    soup = BeautifulSoup(requests.get('https://news.ycombinator.com/news').content)
    links=soup.find_all('span', attrs={'class':'deadmark'})
    for link in links:
        webpage = link.next_sibling.get('href')
        content = BeautifulSoup(requests.get(webpage).content).body.get_text()
        printf((webpage, hls().get(content))) 
Beispiel #26
0
    def deploy_test_app_and_check(self, app, test_uuid):
        """This method deploys the test server app and then
        pings its /operating_environment endpoint to retrieve the container
        user running the task.

        In a mesos container, this will be the marathon user
        In a docker container this user comes from the USER setting
            from the app's Dockerfile, which, for the test application
            is the default, root
        """
        if 'container' in app and app['container']['type'] == 'DOCKER':
            marathon_user = '******'
        else:
            marathon_user = app.get('user', self.default_os_user)
        with self.deploy_and_cleanup(app) as service_points:
            r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host, service_points[0].port))
            if r.status_code != 200:
                msg = "Test server replied with non-200 reply: '{0} {1}. "
                msg += "Detailed explanation of the problem: {2}"
                raise Exception(msg.format(r.status_code, r.reason, r.text))

            r_data = r.json()

            assert r_data['test_uuid'] == test_uuid

            r = requests.get('http://{}:{}/operating_environment'.format(
                service_points[0].host,
                service_points[0].port))

            if r.status_code != 200:
                msg = "Test server replied with non-200 reply: '{0} {1}. "
                msg += "Detailed explanation of the problem: {2}"
                raise Exception(msg.format(r.status_code, r.reason, r.text))

            assert r.json() == {'username': marathon_user}
def do_api_request(api_link, method='GET', params={}):
    # add sendgrid user & api key
    params.update({
            'api_user': settings.get('sendgrid_user'),
            'api_key': settings.get('sendgrid_secret')
    })
    try:
        if method.upper() == 'GET':
            if len(params.keys()) > 0:
                r = requests.get(
                        api_link,
                        params=params,
                        verify=False
                )
            else:
                r = requests.get(
                        api_link,
                        verify=False
                )
        else:
            r = requests.post(
                    api_link,
                    params=params,
                    verify=False
            )
        response = r.json()
    except:
        response = {}
    if settings.get('environment') == "dev":
        logging.info("=================")
        logging.info( api_link)
        logging.info( json.dumps(params, indent=4))
        logging.info( response)
        logging.info( "=================")
    return response
Beispiel #28
0
  def get_all_messages(self):
    print "Reading messages..."
    r = requests.get("https://api.groupme.com/v3/groups/"
        + self.gid + "/messages",
        params = {"token": self.key, "limit": 100})
    message_count = r.json()["response"]["count"]

    i = 0
    out = []

    while r.status_code is 200 and i < message_count:
      progress(i, message_count)
      resp = r.json()["response"]
      messages = resp["messages"]

      for message in messages:
        if message["system"] or message["text"] is None:
          continue
        if message["sender_type"] == u'bot':
          continue

        # ignore bot commands
        if message["text"].startswith("/bot"):
          continue
        out += [message]

      i += len(messages)

      last_id = messages[-1]["id"]
      r = requests.get("https://api.groupme.com/v3/groups/"
          + self.gid + "/messages",
          params = {"token": self.key, "limit": 100, "before_id": last_id})

    return out
Beispiel #29
0
def search():    
    query = raw_input('Search: ').replace (" ", "+")
    url = base_url_search+snip+"&q="+query+"&type=video"+key
    content = json.loads(requests.get(url).text)

    stuff=[]
    stuff = gets_video_id(content)

    num=0

    channelTitle = content['items'][0]['snippet']['channelTitle'].capitalize() 
    num_results=float(int(content['pageInfo']['totalResults']))

    while content['nextPageToken'] and num<5:
        next_page=content['nextPageToken'].encode('UTF8')
        content=''
        url = base_url_search+snip+"&q="+query+"&type=video&pageToken="+next_page+key
        content = json.loads(requests.get(url).text)
        num+=1
    
        for videos in content['items']:
            if videos['id']['kind']=='youtube#video':
                vid_ids=videos['id']['videoId']
                stuff.append(vid_ids)
            
    stuff = [x.encode('UTF8') for x in stuff]
    chunks=[stuff[i:i+50] for i  in range(0, len(stuff), 50)]
    
    return chunks, stuff, channelTitle, num_results
Beispiel #30
0
    def test_max_timeout(self):
        with SplashServer(extra_args=['--max-timeout=0.1']) as splash:
            r1 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("delay?n=1"),
                    'timeout': '0.2',
                },
            )
            self.assertStatusCode(r1, 400)

            r2 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("delay?n=1"),
                    'timeout': '0.1',
                },
            )
            self.assertStatusCode(r2, 504)

            r3 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("delay?n=1")
                },
            )
            self.assertStatusCode(r3, 504)

            r4 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("")
                },
            )
            self.assertStatusCode(r4, 200)
Beispiel #31
0
 def set_html(self):
     r = requests.get(self.url)
     # Handle deleted checklists
     if r.status_code == 400:
         return None
     return BeautifulSoup(r.text, features="html.parser")
Beispiel #32
0
 def fetch_max_item_id(self) -> int:
     return requests.get(f"{HN_BASE_URL}/maxitem.json", timeout=5).json()
Beispiel #33
0
 def get_group(self, group_id):
     try:
         response = requests.get(self.base_url + '/groups/' + str(group_id) + self.notification_url, auth=self.voiceit_basic_auth_credentials, headers=self.headers)
         return response.json()
     except requests.exceptions.HTTPError as e:
         return e.read()
def get_resource(resource_id):
    apitext = SERVER + "/resources/" + resource_id + TOKEN
    response = requests.get(apitext)
    return response
Beispiel #35
0
def index():
    if request.method == 'POST':
        searchString = request.form['content'].replace(
            " ", "")  # obtaining the search string entered in the form
        try:
            dbConn = pymongo.MongoClient(
                "mongodb://localhost:27017/")  # opening a connection to Mongo
            db = dbConn[
                'crawlerDB']  # connecting to the database called crawlerDB
            reviews = db[searchString].find(
                {}
            )  # searching the collection with the name same as the keyword
            if reviews.count(
            ) > 0:  # if there is a collection with searched keyword and it has records in it
                return render_template(
                    'results.html',
                    reviews=reviews)  # show the results to user
            else:
                flipkart_url = "https://www.flipkart.com/search?q=" + searchString  # preparing the URL to search the product on flipkart
                uClient = uReq(
                    flipkart_url)  # requesting the webpage from the internet
                flipkartPage = uClient.read()  # reading the webpage
                uClient.close()  # closing the connection to the web server
                flipkart_html = bs(
                    flipkartPage, "html.parser")  # parsing the webpage as HTML
                bigboxes = flipkart_html.findAll(
                    "div", {"class": "bhgxx2 col-12-12"}
                )  # seacrhing for appropriate tag to redirect to the product link
                del bigboxes[
                    0:
                    3]  # the first 3 members of the list do not contain relevant information, hence deleting them.
                box = bigboxes[0]  #  taking the first iteration (for demo)
                productLink = "https://www.flipkart.com" + box.div.div.div.a[
                    'href']  # extracting the actual product link
                prodRes = requests.get(
                    productLink)  # getting the product page from server
                prod_html = bs(
                    prodRes.text,
                    "html.parser")  # parsing the product page as HTML
                commentboxes = prod_html.find_all('div', {
                    'class': "_3nrCtb"
                })  # finding the HTML section containing the customer comments

                table = db[
                    searchString]  # creating a collection with the same name as search string. Tables and Collections are analogous.
                #filename = searchString+".csv" #  filename to save the details
                #fw = open(filename, "w") # creating a local file to save the details
                #headers = "Product, Customer Name, Rating, Heading, Comment \n" # providing the heading of the columns
                #fw.write(headers) # writing first the headers to file
                reviews = []  # initializing an empty list for reviews
                #  iterating over the comment section to get the details of customer and their comments
                for commentbox in commentboxes:
                    try:
                        name = commentbox.div.div.find_all(
                            'p', {'class': '_3LYOAd _3sxSiS'})[0].text

                    except:
                        name = 'No Name'

                    try:
                        rating = commentbox.div.div.div.div.text

                    except:
                        rating = 'No Rating'

                    try:
                        commentHead = commentbox.div.div.div.p.text
                    except:
                        commentHead = 'No Comment Heading'
                    try:
                        comtag = commentbox.div.div.find_all(
                            'div', {'class': ''})
                        custComment = comtag[0].div.text
                    except:
                        custComment = 'No Customer Comment'
                    #fw.write(searchString+","+name.replace(",", ":")+","+rating + "," + commentHead.replace(",", ":") + "," + custComment.replace(",", ":") + "\n")
                    mydict = {
                        "Product": searchString,
                        "Name": name,
                        "Rating": rating,
                        "CommentHead": commentHead,
                        "Comment": custComment
                    }  # saving that detail to a dictionary
                    x = table.insert_one(
                        mydict
                    )  #insertig the dictionary containing the rview comments to the collection
                    reviews.append(
                        mydict)  #  appending the comments to the review list
                return render_template(
                    'results.html',
                    reviews=reviews)  # showing the review to the user
        except:
            return 'something is wrong'
            #return render_template('results.html')
    else:
        return render_template('index.html')
Beispiel #36
0
def feach(url):
    r = requests.get(url)
    return r.text.replace('\t','')
Beispiel #37
0
def pastebin_search(args, lookup, reportDir, apiKeyDir):

    userAgent = {'User-agent': 'Mozilla/5.0'}

    #return values
    pasteScrapeUrl = []
    pasteScrapeContent = []
    pasteScrapeResult = []

    # check for empty args
    if args.pastebinsearch is not None:

        for a in args.pastebinsearch:
            #init lists
            scrapeURL = []
            scrapeContent = []

            #iterate the lookup list
            for i, l in enumerate(lookup):

                #init textfiles
                scrapedFile = open(
                    reportDir + l + '/' + l + '_pastebin_content.txt', 'w')
                pasteUrlFile = open(
                    reportDir + l + '/' + l + '_pastebin_urls.txt', 'w')

                #show user whiat is being searched
                print '[+] Searching Pastebin for public pastes containing %s' % (
                    l)
                print '[i] May require a Pastebin Pro account for IP whitelisting'

                #run google query code
                try:
                    #iterate url results from search of dork arg and supplied lookup value against pastebin. return top 20 hits
                    for url in search(str(a) + ' ' + str(l) +
                                      ' site:pastebin.com',
                                      stop=20):
                        #delay 1 second to be polite
                        time.sleep(1)
                        #append results together
                        scrapeURL.append(url)
                        if args.verbose is True:
                            print '[+] Paste containing "%s" and "%s" found at: %s' (
                                a, l, url)
                except Exception:
                    print '[-] Error dorking pastebin URLs, skipping...'
                    pasteScrapeResult.append('Error scraping Pastebin')
                    continue

                for u in scrapeURL:
                    #http://docs.python-guide.org/en/latest/scenarios/scrape/
                    try:
                        page = requests.get(u, headers=userAgent)
                        pasteUrlFile.writelines(u)
                    except:
                        print '[-] Error opening ' + u + ':'
                        pasteScrapeResult.append('Error opening %s' % u)
                        continue

                    #build html tree
                    tree = html.fromstring(page.content)

                    #if verbose spit out url, search term and domain searched
                    if args.verbose is True:
                        print '[+] Looking for instances of %s and %s in %s \n' % (
                            a, l, url)
                    #grab raw paste data from the textarea
                    rawPasteData = tree.xpath(
                        '//textarea[@class="paste_code"]/text()')

                    #search lines for lookup and keyword
                    for line in rawPasteData:
                        #regex for the lookup value (domain) in that line
                        #if re.search((str(l)), line):
                        if str(l) in line:
                            #if the argument search term is in the line
                            if a in line:
                                scrapedFile.writelines(a)

                return pasteScrapeResult
Beispiel #38
0
def scrape_sites(args, lookup, reportDir, apiKeyDir):
    scrapeResult = []
    userAgent = {'User-agent': 'Mozilla/5.0'}
    a = ''
    indeedResult = []
    githubResult = []
    virusTotalResult = []
    vtApiKey = ''
    vtParams = {}

    if args.scraper is True:
        for i, l in enumerate(lookup):
            scrapeFile = open(reportDir + l + '/' + l + '_scrape.txt', 'w')

            print '[+] Scraping sites using ' + l
            #http://www.indeed.com/jobs?as_and=ibm.com&as_phr=&as_any=&as_not=&as_ttl=&as_cmp=&jt=all&st=&salary=&radius=25&fromage=any&limit=500&sort=date&psf=advsrch
            #init list and insert domain with tld stripped
            #insert lookup value into static urls
            scrapeUrls = {\
            'indeed':'http://www.indeed.com/jobs?as_and=%s&limit=500&sort=date' % (l.split('.')[0]),\
            'github':'https://api.github.com/search/repositories?q=%s&sort=stars&order=desc' % (l.split('.')[0]),#pull off the tld\
            #'glassdoor':'https://www.glassdoor.com/Reviews/company-reviews.htm?suggestCount=0&suggestChosen=false&clickSource=searchBtn&typedKeyword=%s&sc.keyword=%s&locT=&locId=' % (l.split('.')[0],l.split('.')[0]),\
            #'slideshare':'http://www.slideshare.net/%s' % (l.split('.')[0]),\
            'virustotal':'https://www.virustotal.com/vtapi/v2/domain/report',\
            'censys':'https://www.censys.io/api/v1'\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
			#'':''\
            }

            for name, url in scrapeUrls.items():
                #indeed matches jobs. yeah yeah it doesnt use their api yet
                if name == 'indeed':
                    if args.verbose is True:
                        print '[+] Searching job postings on indeed.com for %s:' % l.split(
                            '.')[0]

                    #http://docs.python-guide.org/en/latest/scenarios/scrape/
                    try:
                        ipage = requests.get(url, headers=userAgent)
                    except:
                        print '[-] Scraping error on ' + url + ':'
                        continue

                    #build html tree
                    itree = html.fromstring(ipage.content)

                    #count jobs
                    jobCount = itree.xpath('//div[@id="searchCount"]/text()')
                    print '[+] ' + str(
                        ''.join(jobCount)
                    ) + ' Jobs posted on indeed.com that match %s:' % (
                        l.split('.')[0])
                    jobTitle = itree.xpath(
                        '//a[@data-tn-element="jobTitle"]/text()')
                    indeedResult.append(
                        '\n[+] Job postings on indeed.com that match %s \n\n' %
                        l.split('.')[0])
                    for t in jobTitle:
                        indeedResult.append(t + '\n')

                #github matches search for user supplied domain
                #https://developer.github.com/v3/search/
                #http://docs.python-guide.org/en/latest/scenarios/json/
                if name == 'github':
                    if args.verbose is True:
                        print '[+] Searching repository names on Github for %s' % (
                            l.split('.')[0])

                    #http://docs.python-guide.org/en/latest/scenarios/scrape/
                    try:
                        gpage = requests.get(url, headers=userAgent)
                    except:
                        print '[-] Scraping error on ' + url + ':'
                        continue

                    #read json response
                    gitJson = gpage.json()

                    #grab repo name from json items>index val>full_name
                    githubResult.append('[+] Github repositories matching ' +
                                        (l.split('.')[0]) + '\n\n')
                    for i, r in enumerate(gitJson['items']):
                        githubResult.append(gitJson['items'][i]['full_name'] +
                                            '\n')

                if name == 'virustotal':
                    if not os.path.exists(apiKeyDir + 'virus_total.key'):
                        print '[-] You are missing %s/virus_total.key' % apiKeyDir
                        #vtApiKey=raw_input("Please provide an API Key: ")

                    #read API key
                    try:
                        with open(apiKeyDir + 'virus_total.key',
                                  'r') as apiKeyFile:
                            for k in apiKeyFile:
                                vtApiKey = k
                    except:
                        print '[-] Error opening %s/virus_total.key key file, skipping. ' % apiKeyDir
                        continue

                    if args.verbose is True:
                        print '[+] VirusTotal domain report for %s' % l
                    virusTotalResult.append(
                        '[+] VirusTotal domain report for %s' % l)

                    vtParams['domain'] = l
                    vtParams['apikey'] = vtApiKey

                    #per their api reference
                    response = urllib.urlopen(
                        '%s?%s' % (url, urllib.urlencode(vtParams))).read()

                    #read json response
                    vtJson = json.loads(response)
                    #virusTotalResult.append(vtJson)

            #write the file
            for g in githubResult:
                scrapeFile.writelines(''.join(str(g.encode('utf-8'))))
            for i in indeedResult:
                scrapeFile.writelines(''.join(str(i.encode('utf-8'))))

            scrapeResult.append(indeedResult)
            scrapeResult.append(githubResult)

            #verbosity logic
            if args.verbose is True:
                for gr in githubResult:
                    print ''.join(gr.strip('\n'))
                for ir in indeedResult:
                    print ''.join(ir.strip('\n'))

        return scrapeResult
Beispiel #39
0
import requests,random,sys,time,os,base64,readline,curses 
from thread import *
os.system("clear")
heder = requests.get("https://raw.githubusercontent.com/tampansky/visitor/master/user-age.txt").text
heder = heder.split("\n")
#FONT COLOR
h = '\033[92m' # hijau
p = '\033[97m' # putih
m = '\033[91m' # merah
br = '\033[94m' # biru
y = '\033[1;33m' # kuning
blck = "\033[0;30m" #hitam
bu = "\033[0;33m" #abu Abu
pur = "\033[0;35m" # purpel
cy = "\033[0;36m" # cyan
# FONT TYPE
faint = "\033[2m" # faint 
bol = "\033[1m" # bold
ita = "\033[3m" # italic
nega = "\033[7m" # negativ
under = "\033[4m" # underline
blink = "\033[5m" # blink
end = "\033[0m" # end
cros = "\033[9m" #crossed
print("tunggu sebentar")
time.sleep(1)
os.system("clear")
print("loading.")
time.sleep(1)
os.system("clear")
print("loading..")
Beispiel #40
0
    def fetch_item_by_id(self, item_id: int) -> Optional[HNItemRecord]:

        item_url = f"{HN_BASE_URL}/item/{item_id}.json"
        item = requests.get(item_url, timeout=5).json()
        return item
def requete(url):
    try:
        r = requests.get(url)
        return url, r.status_code
    except:
        pass
Beispiel #42
0
def jsonToData(url):
    #Calling API
    response = requests.get(url)
    classicBox = json.loads(response.text)
    #JSON to pandas dataframe
    return pd.json_normalize(classicBox['items'],record_path=['courses'])
Beispiel #43
0
def get_html(url):
    html_pack = requests.get(url)
    return html_pack.text
Beispiel #44
0
reqheaders = {
    'X-SOCIALEDGE-ID': '3da9ff7e400b1d535b0115ca064dee02',
    'Content-Type': "application/json"
}

Path("./datas/list").mkdir(parents=True, exist_ok=True)

with open("./listIds") as fp:
    line = fp.readline()
    while line:
        id = line.strip()
        print(id)
        with open("./datas/list/{}.json".format(id), "w") as wfp:
            skip = 0
            count = 0
            while True:
                r = requests.get(
                    "https://api.creatoriq.com/api/view?params[ListId]={}&output=cs&view=List/List&skip={}&take=2000"
                    .format(id, skip),
                    headers=reqheaders)
                if r.status_code != 200:
                    raise Exception("error")
                data = json.loads(r.text)
                if len(data["results"]) == 0:
                    break
                for record in data["results"]:
                    wfp.write(json.dumps(record) + "\n")
                skip += 2000
        line = fp.readline()
Beispiel #45
0
 def get_token(self):
     r=requests.get('https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid='
                    'ww65766ff6db64205d&corpsecret=nC_218vYvuzeQQ_DlrJ-_zik9NQI_FVJblR4k8RW-Lo')
     token=r.json()['access_token']
     return token
Beispiel #46
0
import requests
from pprint import pprint
# json -> dictionary
url = 'https://api.bithumb.com/public/ticker/btc'

data = requests.get(url).json()['data']
pprint(data)

# json은 문자열이므로 형변환
fluctuation = int(data['max_price']) - int(data['min_price'])
if int(data['opening_price']) + fluctuation >= int(data['max_price']):
    print('상승장')
else:
    print('하락장')
Beispiel #47
0
def index():
    response = requests.get("http://127.0.0.1:5000/api/provincias")
    return render_template('provincias.html', data=response.json())
accept_language = config.get('header', 'Accept-Language')
content_type = config.get('header', 'Content-Type')
auth = config.get('header', 'Authorization')

USER_URL = host + ":" + port + "/" + user_api

kylin_headers = {
    'Accept': str(accept),
    'Accept-Language': str(accept_language),
    'Authorization': str(auth),
    'Content-Type': str(content_type)
}

print "Sending URL ==> " + USER_URL + "\n"

response = requests.get(USER_URL, headers=kylin_headers)

print "Response received, validating response \n"

if response.status_code != 200:
    print('Error : ' + str(response.status_code))
    exit()
else:
    print('Status Code : ' + str(response.status_code) + ' Status : SUCCESS')

print "Response : \n"
print(response.json())
print "  \n"

print "code : " + response.json()['code'] + "\n"
print "msg : " + response.json()['msg'] + "\n"
Beispiel #49
0
def simulate(l, b, beamwidth=0.6, v_min=-400, v_max=400, plot_file=''):
	'''
	Simulate 21 cm profiles based on the LAB HI Survey.

	Args:
		l: float. Target galactic longitude [deg]
		b: float. Target galactic latitude [deg]
		beamwidth: float. Telescope half-power beamwidth (approx. equal to 0.7 * lambda/D) [deg]
		v_min: float. Minimum radial velocity (xlim) [km/s]
		v_max: float. Maximum radial velocity (xlim) [km/s]
		plot_file: string. Output plot filename
	'''
	import requests
	import matplotlib
	import matplotlib.pyplot as plt

	if plot_file != '':
		plt.rcParams['figure.figsize'] = (9,7)
	plt.rcParams['legend.fontsize'] = 14
	plt.rcParams['axes.labelsize'] = 14
	plt.rcParams['axes.titlesize'] = 16
	plt.rcParams['xtick.labelsize'] = 12
	plt.rcParams['ytick.labelsize'] = 12

	# Establish velocity limits
	if v_min < -400:
		v_min = -400
	if v_max > 400:
		v_max = 400

	# Download LAB Survey HI data
	try:
		response = requests.get('https://www.astro.uni-bonn.de/hisurvey/euhou/LABprofile/download.php?ral='+str(l)+'&decb='+str(b)+'&csys=0&beam='+str(beamwidth))
	except requests.exceptions.ConnectionError:
		raise requests.exceptions.ConnectionError('Failed to reach astro.uni-bonn.de. Make sure you are connected to the internet and try again.')

	data = response.content

	# Parse data
	data = data.splitlines()
	data = data[4:]
	#data = [' '.join(line.split()).replace('\n', '') for line in data]

	frequency = []
	spectrum = []
	for line in data:
		try:
			frequency.append(float(line.split()[2]))
			spectrum.append(float(line.split()[1]))
		except IndexError:
			break

	# Convert km/s to m/s
	v_min = v_min*1000
	v_max = v_max*1000

	# Define Frequency limits
	left_frequency_edge = 1420.4057517667 + 1420.4057517667e6 * -v_max/(299792458 * 1e6)
	right_frequency_edge = 1420.4057517667 + 1420.4057517667e6 * -v_min/(299792458 * 1e6)

	# Limit galactic coordinates to 2 decimal places
	l = float('%.2f' % l)
	b = float('%.2f' % b)

	# Initiate plot
	fig, ax = plt.subplots()

	try:
		plt.title('Simulated HI Profile $(l$=$'+str(l)+'\degree$, $b$=$'+str(b)+'\degree)$ | Beamwidth: $'+str(beamwidth)+'\degree$', pad=40)
	except: # Catch missing TeX exception
		plt.title('Simulated HI Profile (l='+str(l)+' deg, b='+str(b)+' deg) | Beamwidth: '+str(beamwidth)+' deg', pad=40)

	# Plot data
	ax.plot(frequency, spectrum, label='LAB Survey')
	ax.set_xlabel('Frequency (MHz)')
	ax.set_ylabel('Brightness Temperature (K)')
	ax.set_xlim(left_frequency_edge, right_frequency_edge)
	ax.ticklabel_format(useOffset=False)
	ax.legend(loc='upper left')

	# Set secondary axis for Radial Velocity
	ax_secondary = ax.twiny()
	ax_secondary.set_xlabel('Radial Velocity (km/s)', labelpad=5)
	ax_secondary.axvline(x=0, color='brown', linestyle='--', linewidth=2, zorder=0)

	ax_secondary.set_xlim(v_max/1000, v_min/1000)
	ax_secondary.tick_params(axis='x', direction='in', pad=2)
	ax.grid()

	if plot_file != '':
		# Save plot to file
		plt.savefig(plot_file, bbox_inches='tight', pad_inches=0.1)
	else:
		# Display plot
		plt.show()
	plt.clf()
	plt.close()
def download_volume(manga_to_download, volume, path_to_download_to, loading_bar, app_root, label, button, button_vol):
    if path_to_download_to == "":
        print("No argument was given for path_to_download_to")
        return
    dirpath = tempfile.mkdtemp()
    print(dirpath)
    button.config(state="disabled")
    button_vol.config(state="disabled")
    # If there is no connection, display an error
    try:
        merger = PdfFileMerger()
        chapter_list = get_volume_list(manga_to_download, True)[int(volume) - 1]
        for i in range(len(chapter_list)):
            r = requests.get("https://guya.moe/api/download_chapter/" + manga_to_download + "/" + chapter_list[i].replace(".", "-") + "/", stream=True)
            file_size = r.headers.get("content-length")

            with open(dirpath + "/chapter.zip", "wb") as file:
                if file_size is None:
                    print("No file size header found, cannot display progress")
                    file.write(r.content)
                else:
                    downloaded_data = 0
                    file_size = int(file_size)
                    for data in r.iter_content(chunk_size=32768):
                        downloaded_data += len(data)
                        file.write(data)
                        progress = int(100 * downloaded_data / file_size)
                        loading_bar["value"] = progress
                        label.configure(text=f"{str(progress)}% ({i + 1}/{len(chapter_list)})")
                        app_root.update_idletasks()
                        app_root.update()

            # Extract the zip file
            with zipfile.ZipFile(dirpath + "/chapter.zip", 'r') as zip_ref:
                zip_ref.extractall(dirpath)
            # Create the PDF file
            file_path = dirpath + f"/{chapter_list[i].replace('.', '-')}.pdf"
            pdf_maker.make_pdf(dirpath, file_path)
            # Append the created file to the volume
            print(f"Appended file {file_path}")
            merger.append(file_path)
    except Exception as e:
        mbox.showerror("An error occurred", "Unable to estabilish a connection, check your internet settings")
        print("Error: " + str(e))
        return

    if not path_to_download_to.endswith(".pdf"):
        path_to_download_to += ".pdf"
    '''
    Tried to make this a function in pdf_maker.py, but the pdf was sorted badly 
    (e. g. chapter 1, then 10-5, then 10 and only then 2) so I decided to append the pdf files right on when they were
    created.
    '''
    merger.write(path_to_download_to)
    merger.close()

    shutil.rmtree(dirpath)
    label.configure(text="Ready")
    loading_bar["value"] = 0
    button.config(state="normal")
    button_vol.config(state="normal")
def get_info():
    html = requests.get(url).text
    json_dict = json.loads(html)
    return json_dict
Beispiel #52
0
# ch21_27.py
import bs4, requests, re

url = 'http://www.taiwanlottery.com.tw'
html = requests.get(url)
print("網頁下載中 ...")
html.raise_for_status()                             # 驗證網頁是否下載成功                      
print("網頁下載完成")

objSoup = bs4.BeautifulSoup(html.text, 'lxml')      # 建立BeautifulSoup物件

dataTag = objSoup.select('.contents_box02')         # 尋找class是contents_box02
print("串列長度", len(dataTag))
for i in range(len(dataTag)):                       # 列出含contents_box02的串列                 
    print(dataTag[i])

pattern=r'\d+/\d+/\d+'
# 找尋開出順序與大小順序的球
balls = dataTag[0].find_all('div', {'class':'ball_tx ball_green'})
date = dataTag[0].find('span', {'class':'font_black15'})
datelist=re.findall(pattern,str(date))
print('威力彩開獎 日期: ' + datelist[0])
print("開出順序 : ", end='')
for i in range(6):                                  # 前6球是開出順序
    print(balls[i].text, end='   ')

print("\n大小順序 : ", end='')
for i in range(6,len(balls)):                       # 第7球以後是大小順序
    print(balls[i].text, end='   ')

# 找出第二區的紅球                   
Beispiel #53
0
def browse_types():
    types = get(pub_url+"/lookup/publicationtypes", verify=verify_cert).json()
    return render_template('pubswh/browse_types.html', types=types)
def thumb(char, ts=ts, api_key=api_key, hash_key=hash1):
    url = "http://gateway.marvel.com/v1/public/characters?ts={}&apikey={}&hash={}&nameStartsWith={}".format(
        ts, api_key, hash_key, char)
    response = requests.get(url).json()
    image = response['data']['results'][0]['thumbnail']['path'] + "/detail.jpg"
    return image
Beispiel #55
0
# coding=utf-8
# author=yphacker

import re
import bz2
import requests

url = 'http://www.pythonchallenge.com/pc/def/integrity.html'
content = requests.get(url).content#.decode('utf-8')
un = re.findall(b"un: '([^']+)'", content)[0]
pw = re.findall(b"pw: '([^']+)'", content)[0]
print(un)
print(pw)

print('username:'******'unicode_escape').encode('latin1')))
print('password:'******'unicode_escape').encode('latin1')))
Beispiel #56
0
def get_coords():
    ''' Get coordinates based on the script's IP address '''
    response = requests.get("http://ipinfo.io").json()
    lat, lon = response["loc"].split(',')
    return lat, lon
Beispiel #57
0
 def get_statuses(self):
     response = requests.get(self.get_url('/issue_statuses.json'),
                             data={'key': self.config['api_key']})
     return json.loads(response.text)['issue_statuses']
def main():

    day, current_time = get_date_time()
    full_url = build_url(day)

    try:
        response = requests.get(full_url)

        if response.status_code == 200:
            vendors_data = response.json()
            vendors_data.sort(key = lambda x: x["applicant"]) # sort by the 'applicant' field (assuming this is the name of the Food Truck Vendor)

            index = 0 # the index of the current Vendor we are looking at
            vendors_displayed_so_far = 0
            
            display_more = True
            
            while display_more:
                try: # use a try-except statement to avoid checking the length of the entire response
                    if vendors_displayed_so_far < 10:
                        if vendors_data[index]["start24"] <= current_time <= vendors_data[index]["end24"]: # if the current time is in between a vendor's opening and closing hours
                            print("{} {}".format(vendors_data[index]["applicant"], vendors_data[index]["location"]))
                            vendors_displayed_so_far += 1
                        index += 1
                    else:
                        ask_for_input = True
                        while ask_for_input:
                            user_input = input("\nShow more rows? Enter 'Yes' or 'No' ")
                            
                            if user_input.lower() == "yes":
                                ask_for_input = False
                                vendors_displayed_so_far = 0
                            elif user_input.lower() == "no":
                                ask_for_input = False
                                display_more = False
                                
                            else:
                                print("\nYou didn't enter 'Yes' or 'No'. Try again!")
                except IndexError as e: # there are no more Food Trucks to display
                    print("\nNo more restauraunts to find. That's it!\n")
                    break
        else:
            sys.exit("Server responded with status code {} instead of 200.".format(response.status_code))

    # Exception Handling
    
    except requests.exceptions.HTTPError as e:
        print("An HTTP Error occurred with the following message: {}".format(e))
        sys.exit(1)
    except requests.exceptions.ConnectionError as e:
        print("A Connection Error occurred with the following message: {}".format(e))
        sys.exit(1)
    except requests.exceptions.ConnectTimeout as e:
        print("A Connect Timeout Error (request timed out while trying to connect to the server) occurred with the following message: {}".format(e))
        sys.exit(1)
    except requests.exceptions.ReadTimeout as e:
        print("A Read Timeout Error (server did not send any data in the allotted amount of time) occurred with the following message: {}".format(e))
        sys.exit(1)
    except requests.exceptions.URLRequired as e:
        print("A URL Required Error (invalid URL) occurred with the following message: {}".format(e))
        sys.exit(1)
    except requests.RequestException as e:
        print("An ambiguous Request Exception Error occurred: {}".format(e))
        sys.exit(1)
Beispiel #59
0
def cnnTimer():
    # while True:
        r = requests.get('https://www.googleapis.com/youtube/v3/search?part=snippet&fields=items/snippet/description,items/id/videoId&channelId=' + CNN_CHANNEL_ID + '&maxResults=1&order=date&type=video&key=AIzaSyAjM0NQVnhEvRY15_bhib3y1m0ilQjdjx0')
        description = json.loads(r.text)
        id = description["items"][0]["id"]["videoId"]
        print(id)
Beispiel #60
0
 def get_task(self, task_id):
     response = requests.get(self.get_url('/issues/{}.json'.format(task_id)),
                             data={'key': self.config['api_key']})
     return json.loads(response.text)['issue']