Example #1
1
    def test_status_raising(self):
        r = requests.get(httpbin("status", "404"))
        with pytest.raises(requests.exceptions.HTTPError):
            r.raise_for_status()

        r = requests.get(httpbin("status", "500"))
        assert not r.ok
Example #2
1
    def test_connection_error(self):
        """Connecting to an unknown domain should raise a ConnectionError"""
        with pytest.raises(ConnectionError):
            requests.get("http://fooobarbangbazbing.httpbin.org")

        with pytest.raises(ConnectionError):
            requests.get("http://httpbin.org:1")
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            year = url['year']
            h = {'User-Agent': client.randomagent()}
            title = cleantitle.geturl(url['title']).replace('-', '+')
            url = urlparse.urljoin(self.base_link, self.search_link % title)
            r = requests.get(url, headers=h)
            r = BeautifulSoup(r.text, 'html.parser').find('div', {'class': 'item'})
            r = r.find('a')['href']
            r = requests.get(r, headers=h)
            r = BeautifulSoup(r.content, 'html.parser')
            quality = r.find('span', {'class': 'calidad2'}).text
            url = r.find('div', {'class':'movieplay'}).find('iframe')['src']
            if not quality in ['1080p', '720p']:
                quality = 'SD'

            valid, host = source_utils.is_host_valid(url, hostDict)
            sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
            return sources
        except:
            print("Unexpected error in Furk Script: check_api", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return sources
Example #4
0
    def test_where_3_way_logic(self):
        #             This column
        #                 ⇓
        #  ___________________
        # |     | col1 | col2 |
        #  -------------------
        # | r1  |  1   |      | ⇐ This row
        #  -------------------
        # | r2  |  1   |  2   |
        #  -------------------

        response = requests.get(self.dataset_url)
        datasets = json.loads(response.content)
        self.assertEqual(response.status_code, 200,
            msg="Couldn't get the list of datasets")
        self.assertEqual(datasets['status']['rowCount'], 2,
            msg="2 rows should be in the dataset. r1 and r2")

        params = {
            "select":'col1',
            "where":"'col2' < 2"
        }

        response = requests.get(self.dataset_url + '/query', params=params)
        content = json.loads(response.content)
        self.assertEqual(len(content), 0,
            msg="The query should have returned no results")
Example #5
0
    def test_max_timeout(self):
        with SplashServer(extra_args=['--max-timeout=0.1']) as splash:
            r1 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("delay?n=1"),
                    'timeout': '0.2',
                },
            )
            self.assertStatusCode(r1, 400)

            r2 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("delay?n=1"),
                    'timeout': '0.1',
                },
            )
            self.assertStatusCode(r2, 504)

            r3 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("delay?n=1")
                },
            )
            self.assertStatusCode(r3, 504)

            r4 = requests.get(
                url=splash.url("render.html"),
                params={
                    'url': self.mockurl("")
                },
            )
            self.assertStatusCode(r4, 200)
  def _request_odl_data(self, host, node_connector_list, flow_statistics_list):

    # Data to export from OpenDaylight.
    data_dict = {}

    try:
        # Flow table statistics per host (eg. opendaylight, compute, control and neutron)
        try:
            table_flow_statistics_url = "%s%s%s%s" % (self._odl_inventory_url,'/node/',self.hosts_dict[host],'/table/0/opendaylight-flow-table-statistics:flow-table-statistics')
            table_flow_statistics = requests.get(table_flow_statistics_url)
            table_flow_statistics.raise_for_status()
            data_dict["table_flow_statistics"] = table_flow_statistics
        except requests.exceptions.HTTPError as err:
            print "Can not retrieve flow table statistics:", err
        # Aggregate flow statistics per host (eg. opendaylight, compute, control and neutron)
        try:
            aggregate_flow_statistics_url = "%s%s%s%s" % (self._odl_inventory_url,'/node/',self.hosts_dict[host],'/table/0/aggregate-flow-statistics/')
            aggregate_flow_statistics = requests.get(aggregate_flow_statistics_url)
            aggregate_flow_statistics.raise_for_status()
            data_dict["aggregate_flow_statistics"] = aggregate_flow_statistics
        except requests.exceptions.HTTPError as err:
            pass
            #print "Can not retrieve aggregate flow statistics:", err

        # Individual flow statistics per host (eg. opendaylight, compute, control and neutron)
        data_dict["flow_statistics_list"] = flow_statistics_list

        # Port statistics per host (eg. opendaylight, compute, control and neutron)
        data_dict["node_connector_list"] = node_connector_list

        return data_dict

    except ConnectionError:
        print("Error fetching data from OpenDaylight.")
Example #7
0
def updateCCU(v):
  ccuUrl = "http://pi:8080/api/set"
  try:
    requests.get(ccuUrl + "/AussenTemp/?value=" + str(v.get('outside_temp')))
    requests.get(ccuUrl + "/KollectorTemp/?value=" + str(v.get('collector_temp')))
  except Exception,e:
    logError(e)
Example #8
0
def login_success(token, profile):
  if profile['email'] in allowed_users:
    return render_template('home.html')
    #return jsonify(token=token, profile=profile)
  else:
    requests.get('https://accounts.google.com/o/oauth2/revoke?token='+token['access_token'])
    return """
Example #9
0
  def get_all_messages(self):
    print "Reading messages..."
    r = requests.get("https://api.groupme.com/v3/groups/"
        + self.gid + "/messages",
        params = {"token": self.key, "limit": 100})
    message_count = r.json()["response"]["count"]

    i = 0
    out = []

    while r.status_code is 200 and i < message_count:
      progress(i, message_count)
      resp = r.json()["response"]
      messages = resp["messages"]

      for message in messages:
        if message["system"] or message["text"] is None:
          continue
        if message["sender_type"] == u'bot':
          continue

        # ignore bot commands
        if message["text"].startswith("/bot"):
          continue
        out += [message]

      i += len(messages)

      last_id = messages[-1]["id"]
      r = requests.get("https://api.groupme.com/v3/groups/"
          + self.gid + "/messages",
          params = {"token": self.key, "limit": 100, "before_id": last_id})

    return out
Example #10
0
    def run():
        responses.add(
            responses.GET, 'http://example.com/?test=1',
            match_querystring=True)

        with pytest.raises(ConnectionError):
            requests.get('http://example.com/foo/?test=2')
Example #11
0
def main():
    soup = BeautifulSoup(requests.get('https://news.ycombinator.com/news').content)
    links=soup.find_all('span', attrs={'class':'deadmark'})
    for link in links:
        webpage = link.next_sibling.get('href')
        content = BeautifulSoup(requests.get(webpage).content).body.get_text()
        printf((webpage, hls().get(content))) 
Example #12
0
def do_api_request(api_link, method='GET', params={}):
    # add sendgrid user & api key
    params.update({
            'api_user': settings.get('sendgrid_user'),
            'api_key': settings.get('sendgrid_secret')
    })
    try:
        if method.upper() == 'GET':
            if len(params.keys()) > 0:
                r = requests.get(
                        api_link,
                        params=params,
                        verify=False
                )
            else:
                r = requests.get(
                        api_link,
                        verify=False
                )
        else:
            r = requests.post(
                    api_link,
                    params=params,
                    verify=False
            )
        response = r.json()
    except:
        response = {}
    if settings.get('environment') == "dev":
        logging.info("=================")
        logging.info( api_link)
        logging.info( json.dumps(params, indent=4))
        logging.info( response)
        logging.info( "=================")
    return response
Example #13
0
    def deploy_test_app_and_check(self, app, test_uuid):
        """This method deploys the test server app and then
        pings its /operating_environment endpoint to retrieve the container
        user running the task.

        In a mesos container, this will be the marathon user
        In a docker container this user comes from the USER setting
            from the app's Dockerfile, which, for the test application
            is the default, root
        """
        if 'container' in app and app['container']['type'] == 'DOCKER':
            marathon_user = 'root'
        else:
            marathon_user = app.get('user', self.default_os_user)
        with self.deploy_and_cleanup(app) as service_points:
            r = requests.get('http://{}:{}/test_uuid'.format(service_points[0].host, service_points[0].port))
            if r.status_code != 200:
                msg = "Test server replied with non-200 reply: '{0} {1}. "
                msg += "Detailed explanation of the problem: {2}"
                raise Exception(msg.format(r.status_code, r.reason, r.text))

            r_data = r.json()

            assert r_data['test_uuid'] == test_uuid

            r = requests.get('http://{}:{}/operating_environment'.format(
                service_points[0].host,
                service_points[0].port))

            if r.status_code != 200:
                msg = "Test server replied with non-200 reply: '{0} {1}. "
                msg += "Detailed explanation of the problem: {2}"
                raise Exception(msg.format(r.status_code, r.reason, r.text))

            assert r.json() == {'username': marathon_user}
Example #14
0
def hopcam():
    # Should return a list with revmic's dailymotion videos entitled 'yesterday'
    yda_uri = 'https://api.dailymotion.com/videos?owners=revmic&' \
              'search=yesterday&fields=id,title,description,embed_url,' \
              'thumbnail_480_url,views_total'
    r = requests.get(yda_uri)
    print(r.json())

    try:
        # Get last item in list in case there are multiples (prev delete failed)
        yda_video = get_list(r)[-1]
    except IndexError as e:
        yda_video = {'title': "Sorry. Couldn't find yesterday's video :'("}
        print('IndexError - ', e)

    if 'Sorry' in yda_video['title']:  # Try again, rummage through all videos
        print("trying again")
        uri = 'https://api.dailymotion.com/videos?owners=revmic&' \
              'fields=id,title,description,embed_url,' \
              'thumbnail_480_url,views_total'
        videos = get_list(requests.get(uri))
        print(videos)

        for v in videos:
            if v['title'].lower() == 'yesterday':
                yda_video = v

    return render_template('hopcam.html', yesterday=yda_video)
def query(query_term, folder_name, path):

    BASE_URL = 'https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q=' + query_term + '&start=%d'

    BASE_PATH = os.path.join(path, folder_name.replace(' ', '_'))

    if not os.path.exists(BASE_PATH):
        os.makedirs(BASE_PATH)
        print "made: " + BASE_PATH

    start = 0  # start query string parameter for pagination
    while start < 40:   # query 20 pages
        r = requests.get(BASE_URL % start)
        for image_info in json.loads(r.text)['responseData']['results']:
            url = image_info['unescapedUrl']
            try:
                image_r = requests.get(url)
            except ConnectionError, e:
                print 'could not download %s' % url
                continue

            #remove file-system path characters from name
            title = query_term.replace(' ', '_') + '_' + image_info['imageId']
            file = open(os.path.join(BASE_PATH, '%s.jpg') % title, 'w')
            try:
                Image.open(StringIO(image_r.content)).save(file, 'JPEG')
            except IOError, e:
                # throw away gifs and stuff
                print 'couldnt save %s' % url
                continue
            finally:
def downloadXkcd(startComic, endComic):
    for urlNumber in range(startComic, endComic):
        # download the page
        print('Downloading page http://xkcd.com/%s...' % (urlNumber))
        res = requests.get('http://xkcd.com/%s' % (urlNumber))
        res.raise_for_status()

        soup = bs4.BeautifulSoup(res.text)

        # find the url of the comic image
        comicElem = soup.select('#comic img')
        if comicElem == []:
            print('Could not find comic image.')
        else:
            comicUrl = comicElem[0].get('src')
            # download the image
            print('Downloading image %s...' % (comicUrl))
            res = requests.get(comicUrl)
            res.raise_for_status()

            # sav img to ./xkcd
            imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb')
            for chunk in res.iter_content(1000000):
                imageFile.write(chunk)
            imageFile.close()
Example #17
0
def test_small_layer(svc_url, svc_data):
    """
    Test a service endpoint to see if the layer is small based on some simple rules.

    :param svc_url: The URL pointing to the feature endpoint
    :type svc_url: str
    :param svc_data: A dictionary containing scraped data from an ESRI feature service endpoint
    :type svc_data: dict
    :returns: bool -- True if the layer is considered 'small'
    """
# FIXME needs refactoring, better error handling and better logic
    global _proxies
    try:
        if svc_data['geometryType'] in ('esriGeometryPoint', 'esriGeometryMultipoint', 'esriGeometryEnvelope'):
            count_query = '/query?where=1%3D1&returnCountOnly=true&f=pjson'
            id_query = '/query?where=1%3D1&returnIdsOnly=true&f=json'
            r = requests.get(get_base_url(svc_url) + count_query, proxies=_proxies)
            if 'count' in r.json():
                return r.json()['count'] <= 2000
            r = requests.get(get_base_url(svc_url) + id_query, proxies=_proxies)
            if 'objectIds' in r.json():
                return len(r.json()['objectIds']) <= 2000
    except:
        pass
    return False
Example #18
0
def search():    
    query = raw_input('Search: ').replace (" ", "+")
    url = base_url_search+snip+"&q="+query+"&type=video"+key
    content = json.loads(requests.get(url).text)

    stuff=[]
    stuff = gets_video_id(content)

    num=0

    channelTitle = content['items'][0]['snippet']['channelTitle'].capitalize() 
    num_results=float(int(content['pageInfo']['totalResults']))

    while content['nextPageToken'] and num<5:
        next_page=content['nextPageToken'].encode('UTF8')
        content=''
        url = base_url_search+snip+"&q="+query+"&type=video&pageToken="+next_page+key
        content = json.loads(requests.get(url).text)
        num+=1
    
        for videos in content['items']:
            if videos['id']['kind']=='youtube#video':
                vid_ids=videos['id']['videoId']
                stuff.append(vid_ids)
            
    stuff = [x.encode('UTF8') for x in stuff]
    chunks=[stuff[i:i+50] for i  in range(0, len(stuff), 50)]
    
    return chunks, stuff, channelTitle, num_results
Example #19
0
def get_opendata2_courses():
    good_courses = 0

    file_name = os.path.join(os.path.realpath(os.path.dirname(__file__)),
        '%s/opendata2_departments.json' % c.DEPARTMENTS_DATA_DIR)
    with open(file_name) as departments_file:
        departments = json.load(departments_file)

    # Create a text file for every department
    for d in departments:
        department = d['subject']
        open_data_json = requests.get(
                'https://api.uwaterloo.ca/v2/courses/{0}.json?key={1}'.format(
                department.upper(), s.OPEN_DATA_API_KEY)).json
        open_data_catalog_numbers = []

        for course in open_data_json['data']:
            open_data_catalog_numbers.append(course['catalog_number'])

        # We now poll the individual endpoints of each course for the data
        current_dep_json = []
        course_url = 'https://api.uwaterloo.ca/v2/courses/{0}/{1}.json?key={2}'
        for course in open_data_catalog_numbers:
            good_courses += 1
            json_data = requests.get(course_url.format(department.upper(),
                    course, s.OPEN_DATA_API_KEY)).json
            current_dep_json.append(json_data['data'])

        out_file_name = os.path.join(
                os.path.realpath(os.path.dirname(__file__)),
                'opendata2_courses/%s.json' % department.lower())
        with open(out_file_name, 'w') as courses_out:
            json.dump(current_dep_json, courses_out)

    print 'Found {num} good courses'.format(num=good_courses)
    def get_posts_data(
        self, blog, id=None, get_comments=False, *args, **options
    ):
        if self.blog_to_migrate == "just_testing":
            with open('test-data-comments.json') as test_json:
                return json.load(test_json)

        self.url = blog
        headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json',
        }
        if self.username and self.password:
            auth = b64encode(
                str.encode('{}:{}'.format(self.username, self.password)))
            headers['Authorization'] = 'Basic {}'.format(auth)
        if self.url.startswith('http://'):
            base_url = self.url
        else:
            base_url = ''.join(('http://', self.url))
        posts_url = ''.join((base_url, '/wp-json/posts'))
        comments_url = ''.join((posts_url, '/%s/comments')) % id
        if get_comments is True:
            comments_url = ''.join((posts_url, '/%s/comments')) % id
            fetched_comments = requests.get(comments_url)
            comments_data = fetched_comments.text
            comments_data = self.clean_data(comments_data)
            return json.loads(comments_data)
        else:
            fetched_posts = requests.get(posts_url, headers=headers)
            data = fetched_posts.text
            data = self.clean_data(data)
            return json.loads(data)
Example #21
0
    def get_speakers_ip(self, refresh=False):
        """ Get the IP addresses of all the Sonos speakers in the network.

        Code contributed by Thomas Bartvig (thomas.bartvig@gmail.com)

        Arguments:
        refresh -- Refresh the speakers IP cache.

        Returns:
        IP addresses of the Sonos speakers.

        """
        if self.speakers_ip and not refresh:
            return self.speakers_ip
        else:
            response = requests.get('http://' + self.speaker_ip +
                                    ':1400/status/topology')
            text = response.text
            grp = re.findall(r'(\d+\.\d+\.\d+\.\d+):1400', text)

            for i in grp:
                response = requests.get('http://' + i + ':1400/status')
                if response.status_code == 200:
                    self.speakers_ip.append(i)

            return self.speakers_ip
Example #22
0
 def start_analyzer():
     flag = 1
     next_article = last_article
     counter = 0
     gaplimit = 500
     while flag == 1:
         gaplimit = 50;
         # information_json = urllib.request.urlopen(root + api_get_url + next_article).read()
         # print(str(information_json)[1:])
         counter = counter + 1
         information = requests.get(root + api_get_url + str(gaplimit) + "&gapfrom=" + next_article).json()
         #print(information)
         try:
             while gaplimit>9 and information["query-continue"]["images"]["imcontinue"] != "":
                 print(information["query-continue"]["images"]["imcontinue"])
                 gaplimit -= 10
                 if gaplimit == 0:
                     gaplimit = 1
                 information = requests.get(root + api_get_url + str(gaplimit) + "&gapfrom=" + next_article).json()
                 #print(information)
         except:
             gaplimit = 500
         try:
             next_article = information["query-continue"]["allpages"]["gapcontinue"]
         except:
             flag = 0
         write_or = True
         for page in information["query"]["pages"]:
             if write_or:
                 print(information["query"]["pages"][page]["fullurl"])
                 write_or = False
             WebSurfer.check_page(information["query"]["pages"][page])
Example #23
0
 def test_connect_timeout(self):
     try:
         requests.get(TARPIT, timeout=(0.1, None))
         assert False, "The connect() request should time out."
     except ConnectTimeout as e:
         assert isinstance(e, ConnectionError)
         assert isinstance(e, Timeout)
Example #24
0
def print_physiological_state(bb_thr, rr_thr):
    """ Retrive and print activity metrics of the specified node. """
    # Format the metric requests
    addr = 'http://127.0.0.1:8080'
    bb_request = ('/eegnode/metric/'
                  '{"type":"brainbeat",'
                  '"channels":["ch0", "ch1"],'
                  '"time_window":[15]}')

    rr_request = ('/ecgnode/metric/'
                  '{"type":"mean_hr",'
                  '"channels":["ch0"],'
                  '"time_window":[15],'
                  '"arguments":[100]}')

    # Perform requests
    bb = requests.get(addr + bb_request).json()[0]['return']
    rr = requests.get(addr + rr_request).json()[0]['return']

    if bb > bb_thr and rr > rr_thr:
        mwl_class = '1'
    else:
        mwl_class = '0'

    print('%0.2f\t' % bb + '%0.2f\t' % rr + mwl_class)
Example #25
0
    def test_basicauth_with_netrc(self):
        auth = ("user", "pass")
        wrong_auth = ("wronguser", "wrongpass")
        url = httpbin("basic-auth", "user", "pass")

        def get_netrc_auth_mock(url):
            return auth

        requests.sessions.get_netrc_auth = get_netrc_auth_mock

        # Should use netrc and work.
        r = requests.get(url)
        assert r.status_code == 200

        # Given auth should override and fail.
        r = requests.get(url, auth=wrong_auth)
        assert r.status_code == 401

        s = requests.session()

        # Should use netrc and work.
        r = s.get(url)
        assert r.status_code == 200

        # Given auth should override and fail.
        s.auth = wrong_auth
        r = s.get(url)
        assert r.status_code == 401
Example #26
0
def test_short_app_params_4():
    requests.get(url + '/__engine/1/functions/hello', headers={
        'x-lc-sign': sign_by_master_key
    })
    env = authorization.current_environ
    assert env['_app_params']['key'] is None
    assert env['_app_params']['master_key'] == TEST_MASTER_KEY
Example #27
0
 def test_unicode_get(self):
     url = httpbin("/get")
     requests.get(url, params={"foo": "føø"})
     requests.get(url, params={"føø": "føø"})
     requests.get(url, params={"føø": "føø"})
     requests.get(url, params={"foo": "foo"})
     requests.get(httpbin("ø"), params={"foo": "foo"})
def main():
    j = requests.get("https://www.citibikenyc.com/stations/json")
    
    for m in j.json()["stationBeanList"]:
        conn.setnx(m["stationName"], 0)

    availableBikes = {}
    while True:
        # get the citibike response from their API
        r = requests.get("https://www.citibikenyc.com/stations/json")
        for m in r.json()["stationBeanList"]:
            # for each station, initialise the store if necessary
            if m["stationName"] not in availableBikes:
                availableBikes[m["stationName"]] = m["availableBikes"]
                continue

            delta = m["availableBikes"] - availableBikes[m["stationName"]]
            # if the number of bikes have changed, emit a message
            if delta != 0:
                stdout.flush()
                conn.set(m["stationName"], delta)

            # update the store with the new number of available bikes for that station
            availableBikes[m["stationName"]] = m["availableBikes"]

        # set system sleep
        time.sleep(1)
Example #29
0
def extract_nearby_cities(item, country):
    res = requests.get(item['detail'])
    r = res.json()

    nearby_cities_url = r['properties']['products']['nearby-cities'][0]['contents']['nearby-cities.json']['url']
    res = requests.get(nearby_cities_url)
    r = res.json()

    out = []
    append = out.append

    j = 0
    for i in r:
        city = 'a ' + str(i['distance']) + ' km al '
        city += translate_string(i['direction']) + ' de ' + i['name']
        city = city.replace(', ' + country, '')
        append(city)
        j += 1
        if j == 3:
            break

    if len(out) > 1:
        out[-1] = 'y ' + out[-1]
    nearby_cities = ', '.join(out)
    nearby_cities = re.sub(', y', ' y', nearby_cities)
    return nearby_cities
Example #30
0
    def on_aaa_response(self, *args):
        message = args[0]

        if ('socketid' in message):
            self._redis_obj.publish('intercomm', message['socketid'])
            self._socketid = message['socketid']

        if ('name' in message):
            log('O', message['name'])
            self._socket_io.emit('send_message', self._executable)

        if ('data' in message):
            log('O', message['data'])
            self._redis_obj.publish('intercomm', '***end***')

        if ('picture' in message):
            log('D', message['picture'])
            file = requests.get(message['picture'])

            with open(self._imagepath + '/result' + str(self._socketid) + '.jpg', 'wb') as f:
                f.write(file.content)

            log('D', 'Image Saved: ' + self._imagepath + '/result' + str(self._socketid) + '.jpg')

        if ('mat' in message):
            log('D', message['mat'])
            file = requests.get(message['mat'])
            with open(self._imagepath + '/results' + self._socketid + '.txt', 'wb') as f:
                f.write(file.content)
            log('D', 'Results Saved: ' + self._imagepath + '/results' + self._socketid + '.txt')

        if ('request_data' in message):
            self._socket_io.emit('send_message', 'data')