Example #1
0
 def test_content_and_cookies(self):
     s = requests.session()
     def js(url):
         return json.loads(s.get(url).text)
     r1 = js('http://httpbin.org/cookies/set/test1/test2')
     with requests_cache.disabled():
         r2 = js('http://httpbin.org/cookies')
     self.assertEqual(r1, r2)
     r3 = js('http://httpbin.org/cookies')
     with requests_cache.disabled():
         r4 = js('http://httpbin.org/cookies/set/test3/test4')
     # from cache
     self.assertEqual(r3, js('http://httpbin.org/cookies'))
     # updated
     with requests_cache.disabled():
         self.assertEqual(r4, js('http://httpbin.org/cookies'))
    def __login(self):
        '''Need to login to appcelerator'''
        payload = {'login':self.user, 'password':self.password}

        with requests_cache.disabled():
            r = requests.post(ACS_URLS['login'].format(self.key), data=payload)
            self.cookies = r.cookies
Example #3
0
def get_coinmarketcap_current_price(from_currency, to_currency,
                                    coinmarketcap_id_dict,
                                    coinmarketcap_session):
    coinmarketcap_id = coinmarketcap_id_dict.get(from_currency.upper())

    if coinmarketcap_id:
        to_currency = to_currency.upper()
        try:
            with requests_cache.disabled():
                response = get_request(
                    coinmarketcap_session,
                    coinmarketcap_api_base_url + 'ticker/' +
                    str(coinmarketcap_id) + '/?convert=' + to_currency)
        except:
            print_error_message_and_exit(
                'The program encountered an error while trying to retrieve current prices from the CoinMarketCap.com API.  Please try running the program again later.'
            )
        current_price = float(
            response.json()['data']['quotes'][to_currency]['price'])
    else:
        print(
            '\n' + 'CoinMarketCap does not have the current price for ' +
            from_currency +
            '.  The currency will have a current value of zero in the output file.'
        )
        current_price = 0
    time.sleep(1)
    return current_price
Example #4
0
    def download_informe_mensal(file_name):
        """
        Download do arquivo csv com informe mensal.

        Parametros:
            file_name     (str): Informe file name

        Return:
            (str): path local file
        """
        url = "{}/{}".format(URL_INFORME_DIARIO, file_name)
        local_file = "{}/{}".format(CSV_FILES_DIR, file_name)
        print("downloading url: ", url, os.getpid())

        if os.path.exists(local_file):
            print("Local file already exist: ", file_name)
            return local_file

        with requests_cache.disabled():
            res = download_file(url, local_file)
        if res.status_code == 404:
            print("File not found on cvm site: ", url)
        elif res.status_code == 200:
            print("File downloaded successfully: ", file_name)
            return local_file
        else:
            print("download resposnse: %s", res)

        return
 def push_to_ids_at_channel(self, channel, ids, message):
     logger.debug("Pushing {0} to {1}".format(message, channel))
     string_ids = ",".join(ids)
     payload = {'channel':channel, 'to_ids':string_ids, 'payload':json.dumps({'badge':2, 'sound':'default', 'alert':message})}
     url = ACS_URLS['notify'].format(self.key)
     with requests_cache.disabled():
         r = requests.post(url, data=payload, cookies=self.cookies)
Example #6
0
 def get_open(self):
     with requests_cache.disabled():
         price_text = requests.get('http://finance.yahoo.com/q?s=%s&ql=1' % self.symbol).text
         print self.symbol
         start = price_text.find('<span class="time_rtq_ticker">')
         end = price_text.find('</span>', start)
         self.open_price = float(price_text[start+50+len(self.symbol):end])
Example #7
0
File: gsdl.py Project: looran/gsdl
 def _thread(self, item):
     title = item[0]
     url = item[1]
     description = item[2]
     self._parse(url, title)
     self._parse(url, description)
     if url is None or url == "":
         return
     r = None
     headers = { 'User-Agent': "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)" }
     try:
         r = requests.get(url, headers=headers, allow_redirects=True, verify=False, timeout=20)
     except Exception:
         try:
             with requests_cache.disabled():
                 r = requests.get(url, headers=headers, allow_redirects=True, verify=False, timeout=20)
         except Exception:
             pass
     if not r or r.status_code != 200:
         self.dlerrors_count += 1
         if r:
             print("ERROR: %s (%d)" % (url, r.status_code))
         else:
             print("ERROR: %s" % (url))
         return
     self._parse(url, r.text)
Example #8
0
 def login(self, login_details):
     with requests_cache.disabled():
         login = self.session.get('https://archiveofourown.org/users/login')
         soup = BeautifulSoup(login.text, 'html5lib')
         form = soup.find(id='new_user')
         post = {
             'user[login]':
             login_details[0],
             'user[password]':
             login_details[1],
             # standard fields:
             'user[remember_me]':
             '1',
             'utf8':
             form.find(attrs={'name': 'utf8'})['value'],
             'authenticity_token':
             form.find(attrs={'name': 'authenticity_token'})['value'],
             'commit':
             'Log In',
         }
         # I feel the session *should* handle this cookies bit for me. But
         # it doesn't. And I don't know why.
         self.session.post(self._join_url(login.url,
                                          str(form.get('action'))),
                           data=post,
                           cookies=login.cookies)
         logger.info("Logged in as %s", login_details[0])
def status_marathon_job(context, status, job_id):
    normal_instance_count = 1
    (service, instance, _, __) = decompose_job_id(job_id)
    job_config = marathon_tools.load_marathon_service_config(
        service=service,
        instance=instance,
        cluster=load_system_paasta_config().get_cluster(),
        soa_dir=context.soa_dir,
    )
    app_id = job_config.format_marathon_app_dict()['id']

    with requests_cache.disabled():
        tasks, output = marathon_serviceinit.status_marathon_job(
            service=service,
            instance=instance,
            cluster=load_system_paasta_config().get_cluster(),
            soa_dir=context.soa_dir,
            dashboards=None,
            normal_instance_count=normal_instance_count,
            clients=context.marathon_clients,
            job_config=job_config,
            desired_app_id=app_id,
            verbose=0,
        )
    assert status in output, f"{status!r} not found in {output!r}"
Example #10
0
def requestCheck(phoneNumber, company, data, s):
    try:
        with requests_cache.disabled():
            domain = company['domains'][companyIndex]
            post_url = domain + company['post_url']

            response = s.post(post_url, data=data)
            try:
                responseJson = json.loads(response.text)

                if responseJson['Code'] != 0:
                    f = open("./results/results.csv", "a")
                    f.write(str(phoneNumber) + "\n")
                    f.close()
                    return True
                else:
                    return False
                pass
            except Exception as e:
                logging.error("An error occurred with json parse: " +
                              phoneNumber + " - " + domain + " - " + str(e))
    except Exception as e:
        logging.error("An error occurred: " + phoneNumber + " - " + str(e))
        return False
        pass
Example #11
0
def wait_for_app_to_launch_tasks(client,
                                 app_id,
                                 expected_tasks,
                                 exact_matches_only=False):
    """ Wait for an app to have num_tasks tasks launched. If the app isn't found, then this will swallow the exception
    and retry. Times out after 30 seconds.

    :param client: The marathon client
    :param app_id: The app id to which the tasks belong
    :param expected_tasks: The number of tasks to wait for
    :param exact_matches_only: a boolean indicating whether we require exactly expected_tasks to be running
    """
    found = False
    with requests_cache.disabled():
        while not found:
            try:
                found = app_has_tasks(client, app_id, expected_tasks,
                                      exact_matches_only)
            except NotFoundError:
                pass
            if found:
                time.sleep(3)  # Give it a bit more time to actually launch
                return
            else:
                paasta_print("waiting for app %s to have %d tasks. retrying" %
                             (app_id, expected_tasks))
                time.sleep(0.5)
    def __login(self):
        '''Need to login to appcelerator'''
        payload = {'login': self.user, 'password': self.password}

        with requests_cache.disabled():
            r = requests.post(ACS_URLS['login'].format(self.key), data=payload)
            self.cookies = r.cookies
Example #13
0
    def get_html(self, url: str):
        """Gets the HTML of a webpage using requests, decode as UTF-8 and handle any HTTP errors"""
        if self.cache:
            try:
                # GET the webpage
                request = requests.get(url)
                html = request.content.decode('utf-8')

            # Handle any other errors
            except:
                print(f"URL error for {url}")
                return None
            return html
        else:
            with requests_cache.disabled():
                try:
                    # GET the webpage
                    request = requests.get(url)
                    html = request.content.decode('utf-8')

                # Handle any other errors
                except:
                    print(f"URL error for {url}")
                    return None
            return html
Example #14
0
 def get_likes(self):
     with requests_cache.disabled():
         favors = requests.get(Leetcode.LEETCODE_QUESTIONS,
                               headers=self.headers).json()
         favors = favors['favorites']
         favors = favors['public_favorites'] + favors['private_favorites']
         return set(Q['id'] for favor in favors for Q in favor['questions'])
Example #15
0
def command(ctx, anime_url, episode_range, url, player, skip_download, quality,
            force_download, download_dir, file_format, provider,
            external_downloader, chunk_size, disable_ssl, fallback_qualities,
            choice):
    """ Download the anime using the url or search for it.
    """
    util.print_info(__version__)
    # TODO: Replace by factory
    cls = get_anime_class(anime_url)

    disable_ssl = cls and cls.__name__ == 'Masterani' or disable_ssl
    session.get_session().verify = not disable_ssl

    if not cls:
        anime_url = util.search(anime_url, provider, choice)
        cls = get_anime_class(anime_url)

    anime = cls(anime_url,
                quality=quality,
                fallback_qualities=fallback_qualities)
    logger.info('Found anime: {}'.format(anime.title))

    animes = util.parse_ep_str(anime, episode_range)

    # TODO:
    # Two types of plugins:
    #   - Aime plugin: Pass the whole anime
    #   - Ep plugin: Pass each episode
    if url or player:
        skip_download = True

    if download_dir and not skip_download:
        logger.info('Downloading to {}'.format(os.path.abspath(download_dir)))

    for episode in animes:
        if url:
            util.print_episodeurl(episode)

        if player:
            util.play_episode(episode, player=player)

        if not skip_download:
            if external_downloader:
                logging.info('Downloading episode {} of {}'.format(
                    episode.ep_no, anime.title))
                util.external_download(external_downloader,
                                       episode,
                                       file_format,
                                       path=download_dir)
                continue
            if chunk_size is not None:
                chunk_size *= 1e6
                chunk_size = int(chunk_size)
            with requests_cache.disabled():
                episode.download(force=force_download,
                                 path=download_dir,
                                 format=file_format,
                                 range_size=chunk_size)
            print()
Example #16
0
def spinRoulette():
		# Disabling cache on this call because the whole point is to be a random choice each load.
		with requests_cache.disabled():
				headers = { 'X-ListenAPI-Key': LNKEY }
				url = LNURL + 'just_listen'		
				response = requests.request('GET', url, headers=headers)
				#print(response.from_cache)
				return response.text
Example #17
0
def get_proxy():
    try:
        with requests_cache.disabled():
            ct = requests.get("http://127.0.0.1:5010/get/").content
            return ct
    except Exception as e:
        print("代理失效")
        return b"no proxy!"
Example #18
0
 def execute (self, program, cache=False):
     """ Execute a program - a list of statements. """
     ast = None
     if cache:
         requests_cache.install_cache('demo_cache',
                                      allowable_methods=('GET', 'POST', ))
     else:
         requests_cache.disabled()
         
     if isinstance(program, str):
         ast = self.parse (program)
     if not ast:
         raise ValueError (f"Unhandled type: {type(program)}")
     for statement in ast.statements:
         logger.debug (f"execute: {statement} type={type(statement).__name__}")
         statement.execute (interpreter=self)
     return self.context
def get_projects(project_id_list=None):
    global __ENGINE__
    with requests_cache.disabled():
        if project_id_list is not None:
          projects = [Project(__LIMS__,id=pid) for pid in project_id_list]
        else:
          projects = __LIMS__.get_projects()
    return projects
Example #20
0
 def test_content_and_cookies(self):
     requests_cache.install_cache(CACHE_NAME, CACHE_BACKEND)
     s = requests.session()
     def js(url):
         return json.loads(s.get(url).text)
     r1 = js(httpbin('cookies/set/test1/test2'))
     with requests_cache.disabled():
         r2 = js(httpbin('cookies'))
     self.assertEqual(r1, r2)
     r3 = js(httpbin('cookies'))
     with requests_cache.disabled():
         r4 = js(httpbin('cookies/set/test3/test4'))
     # from cache
     self.assertEqual(r3, js(httpbin('cookies')))
     # updated
     with requests_cache.disabled():
         self.assertEqual(r4, js(httpbin('cookies')))
Example #21
0
 def _api_request(self, query, cache_enabled=True):
     request_string = self.API_URL + query
     if cache_enabled:
         req = requests.get(request_string, headers=self.REQUEST_HEADERS)
     else:
         with requests_cache.disabled():
             req = requests.get(request_string, headers=self.REQUEST_HEADERS)
     return req.json()
Example #22
0
 def test_content_and_cookies(self):
     requests_cache.install_cache(CACHE_NAME, CACHE_BACKEND)
     s = requests.session()
     def js(url):
         return json.loads(s.get(url).text)
     r1 = js(httpbin('cookies/set/test1/test2'))
     with requests_cache.disabled():
         r2 = js(httpbin('cookies'))
     self.assertEqual(r1, r2)
     r3 = js(httpbin('cookies'))
     with requests_cache.disabled():
         r4 = js(httpbin('cookies/set/test3/test4'))
     # from cache
     self.assertEqual(r3, js(httpbin('cookies')))
     # updated
     with requests_cache.disabled():
         self.assertEqual(r4, js(httpbin('cookies')))
Example #23
0
    def close_open_pr(self, pr):
        """Given a PR API response, add a comment and close."""
        log.debug(f"Attempting to close PR: '{pr['html_url']}'")

        # Make a new comment explaining why the PR is being closed
        comment_text = (
            f"Version `{nf_core.__version__}` of the [nf-core/tools](https://github.com/nf-core/tools) pipeline template has just been released. "
            f"This pull-request is now outdated and has been closed in favour of {self.pr_url}\n\n"
            f"Please use {self.pr_url} to merge in the new changes from the nf-core template as soon as possible."
        )
        with requests_cache.disabled():
            comment_request = requests.post(
                url=pr["comments_url"],
                data=json.dumps({"body": comment_text}),
                auth=requests.auth.HTTPBasicAuth(
                    self.gh_username, os.environ["GITHUB_AUTH_TOKEN"]),
            )

        # Update the PR status to be closed
        with requests_cache.disabled():
            pr_request = requests.patch(
                url=pr["url"],
                data=json.dumps({"state": "closed"}),
                auth=requests.auth.HTTPBasicAuth(
                    self.gh_username, os.environ["GITHUB_AUTH_TOKEN"]),
            )
        try:
            pr_request_json = json.loads(pr_request.content)
            pr_request_pp = json.dumps(pr_request_json, indent=4)
        except:
            pr_request_json = pr_request.content
            pr_request_pp = pr_request.content

        # PR update worked
        if pr_request.status_code == 200:
            log.debug("GitHub API PR-update worked:\n{}".format(pr_request_pp))
            log.info(
                f"Closed GitHub PR from '{pr['head']['ref']}' to '{pr['base']['ref']}': {pr_request_json['html_url']}"
            )
            return True
        # Something went wrong
        else:
            log.warning(
                f"Could not close PR ('{pr_request.status_code}'):\n{pr['url']}\n{pr_request_pp}"
            )
            return False
Example #24
0
 def get_hls_url(self, release_url):
     with requests_cache.disabled():
         doc = grab_xml("https:" + release_url.replace("http:", "").replace("https:", ""))
         video = doc.xpath("//smil:video", namespaces=NS)
         if not video:
             return
         video_url = video[0].attrib["src"]
         return video_url
Example #25
0
    def auth_query(
            self,
            endpoint,
            params=None,
            cache_bucket='shortterm',
            short_response=True
    ):
        """
        authorized queries here
        """
        ## Are we actually going to cache?
        if not cache_bucket:
            use_cache = None
        else:
            use_cache = True

        if cache_bucket:
            cache_name = os.path.join('/tmp', cache_bucket)
        else:
            cache_name = None

        cache_timeout = self.constants_data['cache_timeouts'].get(cache_bucket, None)

        if not params:
            params = {}
        import requests_cache
        requests_cache.install_cache(cache_name, expire_after=cache_timeout)
        from urlparse import urljoin

        headers = {"X-API-KEY": API_KEY}

        if not SESSION:
            sys.stderr.write("Need auth SESSION first\n")
            sys.exit(2)

        ## Make sure this doesn't start with a /
        if endpoint[0] == '/':
            endpoint = endpoint[1:]

        end_url = urljoin(BASE_URL, endpoint)

        if not use_cache:
            with requests_cache.disabled():
                raw_results = SESSION.get(end_url, headers=headers, params=params)
        else:
            raw_results = SESSION.get(end_url, headers=headers, params=params)

        if 'Response' not in raw_results.json():
            sys.stderr.write("Error retreiving: %s\n" % end_url)
            sys.stderr.write("%s\n" % raw_results.json())
            sys.exit(2)

        if short_response:
            return_result = raw_results.json()['Response']['data']
        else:
            return_result = raw_results

        return return_result
Example #26
0
    def get(self, doc_url, cache_disabled=False):
        headers = {'x-api-key': self.api_key}
        url = self.api_url + doc_url

        if cache_disabled:
            with requests_cache.disabled():
                return self.requests.get(url, headers=headers).json()

        return self.requests.get(url, headers=headers).json()
    def pending(self) -> bool:
        """Check if a notification is still pending.

        :return: is the notification task in progress or no
        """
        with requests_cache.disabled():
            notification = self.request().json()
        self.data = notification
        return notification["status"] == "in-progress"
Example #28
0
 def get_auth_token(self):
     path = "/auth/hls/sign?ts=%s&hn=%s&d=android-tablet" % (int(
         time.time()), self.video_key)
     sig = hmac.new(b'android.content.res.Resources', path.encode("utf-8"),
                    hashlib.sha256).hexdigest()
     auth_url = BASE_URL + path + "&sig=" + sig
     with requests_cache.disabled():
         auth_token = grab_text(auth_url)
     return auth_token
Example #29
0
 async def quote(self, ctx):
     """Get an inspirational quote"""
     # don't cache this page or every quote will be the same
     with requests_cache.disabled():
         response = requests.get("https://zenquotes.io/api/random")
     json_data = json.loads(response.text)
     quote = json_data[0]['q'] + " -" + json_data[0]['a']
     embedQuote = discord.Embed(title="Inspirational Quote", description=quote, color=0x282828)
     await ctx.send(embed=embedQuote)
Example #30
0
def robust_request(url: str, retry_interval=0.0, retry_max=20):
    response = requests.get(url)
    with requests_cache.disabled():
        retry_count = 0
        while response.status_code == 200 and not response.text and retry_count < retry_max:
            time.sleep(retry_interval)
            response = requests.get(url)
            retry_count += 1
    return response
Example #31
0
    def _fetch(self, uri, cache=True):
        if cache:
            with requests_cache.enabled():
                r = requests.get(uri, headers={'Accept' : 'application/json'})
        else:
            with requests_cache.disabled():
                r = requests.get(uri, headers={'Accept' : 'application/json'})

        return r.json
Example #32
0
 def _batch():
     if caching_avail and self._cached:
         self._cached = False
         with requests_cache.disabled():
             from_cache, ret = self._get(_url, params=kwargs, verbose=verbose)
         self._cached = True
     else:
         from_cache, ret = self._get(_url, params=kwargs, verbose=verbose)
     return ret
Example #33
0
    def _fetch(self, uri, cache=True):
        if cache:
            with requests_cache.enabled():
                r = requests.get(uri, headers={'Accept': 'application/json'})
        else:
            with requests_cache.disabled():
                r = requests.get(uri, headers={'Accept': 'application/json'})

        return r.json
 def __do_get(self, url):
     with requests_cache.disabled():
         r = requests.get(url)
         logger.info("Request of weather data to {0}.".format(url))
         try:
             return json.loads(r.text)
         except ValueError:
             logger.warning("Could not get weather data from {0}".format(url))
             return {} # Something went wrong!
Example #35
0
    def raw_request(self, base_url, endpoint, params, disable_cache=False):
        if disable_cache:
            disable_cache_session = self.session
            with requests_cache.disabled():
                resp = disable_cache_session.get(base_url + endpoint, params=params, timeout=self.request_timeout)
        else:
            resp = self.session.get(base_url + endpoint, params=params, timeout=self.request_timeout)

        return resp.text
Example #36
0
 def get_submit_list(self, question_slug):
     with requests_cache.disabled():
         data = '{"operationName":"Submissions","variables":{"offset":0,"limit":0,"lastKey":null,"questionSlug":"%s"},"query":"query Submissions($offset: Int!, $limit: Int!, $lastKey: String, $questionSlug: String!) {\\n  submissionList(offset: $offset, limit: $limit, lastKey: $lastKey, questionSlug: $questionSlug) {\\n    lastKey\\n    hasNext\\n    submissions {\\n      id\\n      statusDisplay\\n      lang\\n      runtime\\n      timestamp\\n      url\\n      isPending\\n      memory\\n      __typename\\n    }\\n    __typename\\n  }\\n}\\n"}' % question_slug
         submit_list = [
             item for item in requests.post(
                 Leetcode.LEETCODE_GRAPHQL, headers=self.headers,
                 data=data).json()['data']['submissionList']['submissions']
             if item['statusDisplay'].lower() == 'accepted'
         ]
         return submit_list
Example #37
0
 async def catfact(self, ctx):
     """Get a random catfact"""
     url = "https://catfact.ninja/facts"
     with requests_cache.disabled():
         response = requests.get(url)
     data = json.loads(response.text)
     fact = data['data'][0]['fact']
     embed = discord.Embed(title="Cat Fact", description=fact)
     embed.set_footer(text="🐱")
     await ctx.send(embed=embed)
Example #38
0
 def updatePlexList(self, plex):
     with requests_cache.disabled():
         try:
             plex.playlist(self.name).delete()
         except (NotFound, BadRequest):
             logging.error(
                 "Playlist %s not found, so it could not be deleted. Actual playlists: %s"
                 % (self.name, plex.playlists()))
             pass
         plex.createPlaylist(self.name, items=self.plex_movies)
Example #39
0
 def get_problems_translation(self):
     with requests_cache.disabled():
         data = '{\"operationName\":\"getQuestionTranslation\",\"variables\":{},\"query\":\"query getQuestionTranslation($lang: String) {\\n  translations: allAppliedQuestionTranslations(lang: $lang) {\\n    title\\n    questionId\\n    __typename\\n  }\\n}\\n\"}'
         translations = requests.get(
             Leetcode.LEETCODE_GRAPHQL, headers=self.headers,
             data=data).json()['data']['translations']
         return {
             entry['questionId']: entry['title']
             for entry in translations
         }
Example #40
0
def release_history(channel, os):
    # Query limit is 1000, so to go back far enough we need to query
    # each os/channel pair separately.
    url = RELEASE_HISTORY_CSV_URL + "?os=%s&channel=%s" % (os, channel)
    with requests_cache.disabled():
        history_text = requests.get(url).text
    lines = history_text.strip('\n').split('\n')
    expected_fields = ['os', 'channel', 'version', 'timestamp']
    releases = read_csv_lines(lines, expected_fields)
    return releases
Example #41
0
 def _get(self, path, params=None, use_cache=True):
     url = '{}{}'.format(BASE_URL, path)
     log.debug(url)
     if use_cache:
         resp = requests.get(url, headers={'Authorization': 'Basic {}'.format(self.api_key)}, params=params)
     else:
         with requests_cache.disabled():
             resp = requests.get(url, headers={'Authorization': 'Basic {}'.format(self.api_key)}, params=params)
     resp.raise_for_status()
     return resp.json()
 def __do_get(self, url):
     with requests_cache.disabled():
         r = requests.get(url)
         logger.info("Request of weather data to {0}.".format(url))
         try:
             return json.loads(r.text)
         except ValueError:
             logger.warning(
                 "Could not get weather data from {0}".format(url))
             return {}  # Something went wrong!
 def subscribe_device(self, channel, device_type, device_id):
     try:
         self.clients[channel].append(device_id)
     except KeyError:
         self.clients[channel] = [device_id]
     finally:
         url = ACS_URLS['subscribe'].format(self.key)
         payload = {'type':device_type, 'device_id':device_id, 'channel':'channel'}
         with requests_cache.disabled():
             r = requests.post(url, data=payload, cookies=self.cookies)
def fetch_csv_from_url(url):
	""" Gets a fresh copy of the Google Forms Response file and treats it like a file object. 

		In order for this to work, the response sheet must be published to the web as csv and the link must be put in config.py under the variable gforms_url.
	"""
	
	#cache avoidance.
	with requests_cache.disabled():
		r = requests.get(url)
		if r.status_code == 200:
			return r.iter_lines()
Example #45
0
 def test_response_history(self):
     r1 = requests.get('http://httpbin.org/redirect/3')
     def test_redirect_history(url):
         r2 = requests.get(url)
         for r11, r22 in zip(r1.history, r2.history):
             self.assertEqual(r11.url, r22.url)
     test_redirect_history('http://httpbin.org/redirect/3')
     test_redirect_history('http://httpbin.org/redirect/2')
     with requests_cache.disabled():
         r3 = requests.get('http://httpbin.org/redirect/1')
         self.assertEqual(len(r3.history), 1)
Example #46
0
    def fetch(self, uri, cache=True):
        if cache:
            with requests_cache.enabled():
                r = requests.get(uri, headers={'Accept' : 'application/json'})
        else:
            with requests_cache.disabled():
                r = requests.get(uri, headers={'Accept' : 'application/json'})

        # last uri-part is dataset name and dictionary key
        key = uri.split('/')[-1]

        return r.json.get(key)
def request_json_resource_cacheless(url, params=None, retry=3, time_between=1):
    with requests_cache.disabled():
        for i in range(retry):
            response = requests.get(url, params=params, headers={'Origin':'http://www.lolesports.com'})
            if response.status_code == 200:
                return response.json(object_pairs_hook=OrderedDict)
            elif response.status_code == 404:
                break
            else:
                time.sleep(time_between)

        raise Exception('Unable to retrieve json recourse')
 def get_cloud_cover(self, city):
     '''Gets cloud cover in % for the given city'''
     url = API_URLS['weather']['city_search'].format(city)
     logger.info("Requesting cloud cover for {0}.".format(city))
     with requests_cache.disabled():
         r = requests.get(url)
         try:
             result = json.loads(r.text)
         except ValueError:
             logger.warning("Could not get cloud cover for {0}".format(city))
             return '0'
         return result['data']['current_condition'][0]['cloudcover']
Example #49
0
def fetch_branch_release_times():
    release_times = {}
    # Always grab the most recent release history.
    with requests_cache.disabled():
        history_text = requests.get(RELEASE_HISTORY_CSV_URL).text
    for line in history_text.strip('\n').split('\n'):
        os, channel, version, date_string = line.split(',')
        date = parse_datetime_ms(date_string)
        branch = version.split('.')[2]
        last_date = release_times.get(branch)
        if not last_date or last_date > date:
            release_times[branch] = date
    return release_times
def request_api_resource(relative_url, params=None, retry=3, time_between=1):
    url = URL_FORMAT % (LOCAL_HOST, LOCAL_PORT, relative_url)
    with requests_cache.disabled():
        for i in xrange(retry):
            response = requests.get(url, params=params)
            if response.status_code == 200:
                return response.json(object_pairs_hook=OrderedDict)
            elif response.status_code == 404:
                raise Exception('Bracket Info is Invalid, 404 when retrieving bracket data')
            else:
                time.sleep(time_between)

        raise Exception('Unable to retrieve json recourse')
Example #51
0
def status_marathon_job(context, status, job_id):
    normal_instance_count = 1
    (service, instance, _, __) = decompose_job_id(job_id)
    app_id = marathon_tools.create_complete_config(service, instance, soa_dir=context.soa_dir)['id']

    with requests_cache.disabled():
        output = marathon_serviceinit.status_marathon_job(
            service,
            instance,
            app_id,
            normal_instance_count,
            context.marathon_client
        )
    assert status in output
 def get_ticket(self):
     with requests_cache.disabled():
         params = {'service': 'http://umlsks.nlm.nih.gov'}
         headers = {
             'Content-type':'application/x-www-form-urlencoded',
             'Accept': 'text/plain',
             'User-Agent':'python',
         }
         ticket_response = requests.post(
             self.ticket_granter, 
             data=params, 
             headers=headers
         )
         return ticket_response.text
Example #53
0
def main():
    # Once cached, delayed page will be taken from cache
    # redirects also handled
    for i in range(5):
        requests.get('http://httpbin.org/delay/2')
        r = requests.get('http://httpbin.org/redirect/5')
        print(r.text)

    # And if we need to get fresh page or don't want to cache it?
    with requests_cache.disabled():
        print(requests.get('http://httpbin.org/ip').text)

    # Debugging info about cache
    print(requests_cache.get_cache())
 def __init__(self, username, password):
     params = {'username': username, 'password': password}
     headers = {
         'Content-type':'application/x-www-form-urlencoded',
         'Accept': 'text/plain',
         'User-Agent':'python',
     }
     with requests_cache.disabled():
         auth_response = requests.post(
             'https://utslogin.nlm.nih.gov/cas/v1/tickets/',
             data=params,
             headers=headers,
         )
         parser = BeautifulSoup(auth_response.content, 'lxml')
         self.ticket_granter = parser.form['action']
Example #55
0
    def _get(cls, api, *args,**kwargs):
        cached = kwargs.pop("cached", True)
        auth = None
        if os.getenv('TRAKT_USERNAME') and os.getenv('TRAKT_PASSWORD'):
            auth = (os.getenv('TRAKT_USERNAME'), os.getenv('TRAKT_PASSWORD'))

        if cached:
            response = requests.get(cls._builduri(api, *args), auth=auth).json()
        elif not cached:
            with requests_cache.disabled():
                response = requests.get(cls._builduri(api, *args), auth=auth).json()

        if isinstance(response, dict) and response.get('status', False) == 'failure':
            raise TraktException(response.get('error', 'Unknown Error'))
        return response
Example #56
0
    def test_disabled(self):

        url = httpbin('get')
        requests_cache.install_cache(CACHE_NAME, backend=CACHE_BACKEND, fast_save=FAST_SAVE)
        requests.get(url)
        with requests_cache.disabled():
            for i in range(2):
                r = requests.get(url)
                self.assertFalse(getattr(r, 'from_cache', False))
        with self.s.cache_disabled():
            for i in range(2):
                r = self.s.get(url)
                self.assertFalse(getattr(r, 'from_cache', False))
        r = self.s.get(url)
        self.assertTrue(getattr(r, 'from_cache', False))
Example #57
0
    def request(self, url, usecache=True, extra_headers=None):
        'Make a GET request for url, with or without caching'
        if not url.startswith('http'):
            # relative url
            url = self.rootpath + url
        logging.debug("getting url: %s, usecache=%s, extra_headers=%s", url, usecache, extra_headers)
        if extra_headers is None: extra_headers={}
        if usecache:
            r = self.session.get(url, headers=extra_headers)
        else:
            with requests_cache.disabled():
                r = self.session.get(url, headers=extra_headers)

        if r.status_code in ( 500, ):
            raise JFSError(r.reason)
        return r
Example #58
0
    def pending(self):
        """ Check if a task is still pending.

        Returns
        -------
        bool
            Is the task still pending completion?
        """
        # Ensure we don't have stale data

        # Disable caching so we get the real response
        with requests_cache.disabled():
            task = self.request()

        self.data = task
        return None == task["completed"]
Example #59
0
 def getversions(self, **kwargs):
     versions = ()
     kwargs['version'] = 'any'
     req_url = self.__buildURI(mountpoint='/service/local/lucene/search', **kwargs)
     with requests_cache.disabled():
         req = self.reqS.get(req_url)
     if req.status_code == 200:
         data = req.text
     else:
         return versions
     try:
         xml = ElementTree.XML(data)
         for item in ElementPath.findall(xml, './/artifact'):
             if item.find('artifactId').text == kwargs['artifact_id']:
                 versions = versions + (item.find('version').text,)
     except:
         pass
     return versions