def worker(self, system_name, result):
        try:
            r = requests.get(
                "http://www.edsm.net/api-v1/system?sysname=%s&coords=1" % urllib.quote(system_name),
                timeout=EDSM._TIMEOUT,
            )
            r.raise_for_status()
            data = r.json()

            if data == -1:
                # System not present - create it
                result["img"] = EDSM._IMG_NEW
                result["done"] = True  # give feedback immediately
                requests.get(
                    "http://www.edsm.net/api-v1/url?sysname=%s" % urllib.quote(system_name), timeout=EDSM._TIMEOUT
                )  # creates system
            elif data.get("coords"):
                self.known(system_name, result)
            else:
                result["img"] = EDSM._IMG_UNKNOWN
        except:
            if __debug__:
                print_exc()
            result["img"] = EDSM._IMG_ERROR
        result["done"] = True
Example #2
1
    def test_connection_error(self):
        """Connecting to an unknown domain should raise a ConnectionError"""
        with pytest.raises(ConnectionError):
            requests.get("http://fooobarbangbazbing.httpbin.org")

        with pytest.raises(ConnectionError):
            requests.get("http://httpbin.org:1")
Example #3
1
    def test_status_raising(self):
        r = requests.get(httpbin("status", "404"))
        with pytest.raises(requests.exceptions.HTTPError):
            r.raise_for_status()

        r = requests.get(httpbin("status", "500"))
        assert not r.ok
 def crawl_by_day(self, date_str):
     try:
         for catg in ["newsgn", "newsgj", "newssh"]:
             page = 1
             random_str = random.random()
             headers = {"Referer": self.__referUrl.format(date_str)}
             # 当日首页
             res = requests.get(
                 self.__listUrl.format(random_str, catg, date_str, page), headers=headers, timeout=self.timeout
             )
             jo = res.json()
             responsecode = jo["response"]["code"]
             if responsecode == "0":
                 pagecount = jo["data"]["count"]
                 articles = jo["data"]["article_info"]
                 self.parse_articles_list(articles, date_str)
                 # 循环分页
                 while page < pagecount:
                     page += 1
                     res = requests.get(
                         self.__listUrl.format(random_str, catg, date_str, page),
                         headers=headers,
                         timeout=self.timeout,
                     )
                     jo = res.json()
                     articles = jo["data"]["article_info"]
                     self.parse_articles_list(articles, date_str)
         self.save_temp_dict()
     except Exception, e:
         logger.error(date_str + " error", e.message)
Example #5
1
def proxy_resource(context, data_dict):
    """ Chunked proxy for resources. To make sure that the file is not too
    large, first, we try to get the content length from the headers.
    If the headers to not contain a content length (if it is a chinked
    response), we only transfer as long as the transferred data is less
    than the maximum file size. """
    resource_id = data_dict["resource_id"]
    log.info("Proxify resource {id}".format(id=resource_id))
    try:
        resource = logic.get_action("resource_show")(context, {"id": resource_id})
    except logic.NotFound:
        base.abort(404, _("Resource not found"))
    url = resource["url"]

    parts = urlparse.urlsplit(url)
    if not parts.scheme or not parts.netloc:
        base.abort(409, detail="Invalid URL.")

    try:
        # first we try a HEAD request which may not be supported
        did_get = False
        r = requests.head(url)
        # Servers can refuse HEAD requests. 405 is the appropriate response,
        # but 400 with the invalid method mentioned in the text, or a 403
        # (forbidden) status is also possible (#2412, #2530)
        if r.status_code in (400, 403, 405):
            r = requests.get(url, stream=True)
            did_get = True
        r.raise_for_status()

        cl = r.headers.get("content-length")
        if cl and int(cl) > MAX_FILE_SIZE:
            base.abort(
                409,
                """Content is too large to be proxied. Allowed
                file size: {allowed}, Content-Length: {actual}.""".format(
                    allowed=MAX_FILE_SIZE, actual=cl
                ),
            )

        if not did_get:
            r = requests.get(url, stream=True)

        base.response.content_type = r.headers["content-type"]
        base.response.charset = r.encoding

        length = 0
        for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
            base.response.body_file.write(chunk)
            length += len(chunk)

            if length >= MAX_FILE_SIZE:
                base.abort(409, headers={"content-encoding": ""}, detail="Content is too large to be proxied.")

    except requests.exceptions.HTTPError, error:
        details = "Could not proxy resource. Server responded with %s %s" % (
            error.response.status_code,
            error.response.reason,
        )
        base.abort(409, detail=details)
Example #6
1
    def test_iter_lines(self):

        lines = (0, 2, 10, 100)

        for i in lines:
            r = get(httpbin("stream", str(i)), prefetch=False)
            lines = list(r.iter_lines())
            len_lines = len(lines)

            self.assertEqual(i, len_lines)

        # Tests that trailing whitespaces within lines do not get stripped.
        # Tests that a trailing non-terminated line does not get stripped.
        quote = (
            """Agamemnon  \n"""
            """\tWhy will he not upon our fair request\r\n"""
            """\tUntent his person and share the air with us?"""
        )

        # Make a request and monkey-patch its contents
        r = get(httpbin("get"))
        r.raw = StringIO(quote)

        lines = list(r.iter_lines())
        len_lines = len(lines)
        self.assertEqual(len_lines, 3)

        joined = lines[0] + "\n" + lines[1] + "\r\n" + lines[2]
        self.assertEqual(joined, quote)
Example #7
0
def locate(target):
    # Potential locations.
    # These are some common subfolders that sites might exist in
    # Add to the list as more sites are found... google dork it.
    locations = ["/", "/Search", "/sites/us/en"]
    serv = "_vti_bin/People.asmx"

    # Loop over common locations, trying to ID the service
    for option in locations:
        loc = target[0] + option + serv
        r = requests.get(loc)
        if str(r.status_code).startswith("200"):  # Found it
            print (yellow + "[*] " + endc + "Located People.asmx at: %s" % loc)

            loc = loc + "?WSDL"
            q = requests.get(loc)  # Get the WSDL
            if str(q.status_code).startswith("200"):
                print (yellow + "[*]" + endc + " Located People WSDL at: %s" % loc)
                return loc  # Returns format: "http://www.site.com/_vti_bin/Service.asmx?WSDL"
            else:
                print (red + "[!]" + endc + "Problem locating the WSDL...this shouldn't have happened")
        else:
            continue

    # If we get this far, we couldn't find People.asmx
    print (yellow + "[!]" + endc + "Failed to locate the People.asmx service in common locations.")
    con = raw_input(yelow + "[?]" + endc + "Specify the location manually? (Y/N): ")

    # User-defined URL
    if con.capitalize() == "Y":
        loc = raw_input(cyan + "[?]" + endc + "URL of People.asmx [Format: http://domain.com/People.asmx]:")
        return loc
    else:
        return None
Example #8
0
def resolve(url):
    try:
        usr = re.compile("/mail/(.+?)/").findall(url)[0]
        vid = re.compile("(\d*)[.]html").findall(url)[0]
        url = "http://videoapi.my.mail.ru/videos/mail/%s/_myvideo/%s.json?ver=0.2.60" % (usr, vid)

        result = requests.get(url).content
        cookie = requests.get(url).headers["Set-Cookie"]

        u = json.loads(result)["videos"]
        h = "|Cookie=%s" % urllib.quote(cookie)

        url = []
        try:
            url += [[{"quality": "1080p", "url": i["url"] + h} for i in u if i["key"] == "1080p"][0]]
        except:
            pass
        try:
            url += [[{"quality": "HD", "url": i["url"] + h} for i in u if i["key"] == "720p"][0]]
        except:
            pass
        try:
            url += [
                [{"quality": "SD", "url": i["url"] + h} for i in u if not (i["key"] == "1080p" or i["key"] == "720p")][
                    0
                ]
            ]
        except:
            pass

        if url == []:
            return
        return url
    except:
        return
Example #9
0
def get_url_thumbnail(url):
    """Save url's image, if does not exist already locally."""
    # TODO optimization: get chunks of data until find the og:image
    # same to the script for suggesting the title.
    try:
        response = requests.get(url)
    except OSError:  # Host might now allow extenrnal requests
        return None
    if response.status_code == 200:
        soup = BeautifulSoup(response.content, "html.parser")
        img_has_link = soup.find("meta", {"property": "og:image"})
        img_link = None
        if img_has_link:
            img_link = img_has_link.get("content")
        if img_link is not None:
            img_name = basename(img_link)
            destination = current_app.static_folder + "/img/" + img_name
            if not isfile(destination):
                img_response = requests.get(img_link, stream=True)
                if img_response.status_code == 200:
                    with open(destination, "wb") as fob:
                        for chunk in img_response:
                            fob.write(chunk)
                else:
                    # TODO if not accessible i should re-try to download
                    return None
            return img_name
    return None
Example #10
0
def wiki(searchterm):
    """return the top wiki search result for the term"""
    searchterm = quote(searchterm)

    url = "https://en.wikipedia.org/w/api.php?action=query&list=search&srsearch={0}&format=json"
    url = url.format(searchterm)

    result = requests.get(url).json()

    pages = result["query"]["search"]

    # try to reject disambiguation pages
    pages = [p for p in pages if "may refer to" not in p["snippet"]]

    if not pages:
        return ""

    page = quote(pages[0]["title"].encode("utf8"))
    link = "http://en.wikipedia.org/wiki/{0}".format(page)

    r = requests.get("http://en.wikipedia.org/w/api.php?format=json&action=parse&page={0}".format(page)).json()
    soup = BeautifulSoup(r["parse"]["text"]["*"])
    p = soup.find("p").get_text()
    p = p[:8000]

    return u"{0}\n{1}".format(p, link)
Example #11
0
def index():
    form = PricesForm()
    geocode_url = "https://geocode-maps.yandex.ru/1.x/"
    uberapi_url = "https://api.uber.com/v1/estimates/price"
    if form.validate_on_submit():
        start = form.start_location.data
        end = form.end_location.data
        geo_param = {"format": "json", "geocode": start}
        r = requests.get(geocode_url, geo_param)
        start_point = r.json()["response"]["GeoObjectCollection"]["featureMember"][0]["GeoObject"]["Point"]["pos"]
        t = start_point.partition(" ")
        start_longitude = float(t[0])
        start_latitude = float(t[2])

        geo_param["geocode"] = end
        r = requests.get(geocode_url, geo_param)
        end_point = r.json()["response"]["GeoObjectCollection"]["featureMember"][0]["GeoObject"]["Point"]["pos"]
        t = end_point.partition(" ")
        end_longitude = float(t[0])
        end_latitude = float(t[2])

        uber_param = {
            "server_token": server_token,
            "start_latitude": start_latitude,
            "start_longitude": start_longitude,
            "end_latitude": end_latitude,
            "end_longitude": end_longitude,
        }
        r = requests.get(uberapi_url, uber_param)
        p = r.json()["prices"]
        distance = p[0]["distance"] * 1.60934
        flash(u"Расстояние: " + str(distance) + u" км")
        for item in p:
            flash(item["display_name"] + ": " + item["estimate"])
    return render_template("index.html", form=form)
Example #12
0
    def worker(self, system_name, result):
        try:
            r = requests.get(
                "http://www.edsm.net/api-v1/system?sysname=%s&coords=1&fromSoftware=%s&fromSoftwareVersion=%s"
                % (urllib.quote(system_name), urllib.quote(applongname), urllib.quote(appversion)),
                timeout=EDSM._TIMEOUT,
            )
            r.raise_for_status()
            data = r.json()

            if data == -1:
                # System not present - create it
                result["img"] = EDSM._IMG_NEW
                result["uncharted"] = True
                result["done"] = True  # give feedback immediately
                requests.get(
                    "http://www.edsm.net/api-v1/url?sysname=%s&fromSoftware=%s&fromSoftwareVersion=%s"
                    % (urllib.quote(system_name), urllib.quote(applongname), urllib.quote(appversion)),
                    timeout=EDSM._TIMEOUT,
                )  # creates system
            elif data.get("coords"):
                result["img"] = EDSM._IMG_KNOWN
                result["done"] = True
                self.syscache.add(system_name)
            else:
                result["img"] = EDSM._IMG_UNKNOWN
                result["uncharted"] = True
        except:
            if __debug__:
                print_exc()
            result["img"] = EDSM._IMG_ERROR
        result["done"] = True
Example #13
0
    def test_scan_log(self):
        profile, target_url = get_test_profile(SLOW_TEST_PROFILE)
        data = {"scan_profile": profile, "target_urls": [target_url]}
        requests.post("%s/scans/" % self.api_url, data=json.dumps(data), headers=self.headers)

        #
        # Wait until the scanner finishes and assert the vulnerabilities
        #
        self.wait_until_running()
        self.wait_until_finish(500)

        #
        # Get the scan log
        #
        response = requests.get("%s/scans/0/log" % self.api_url)
        self.assertEqual(response.status_code, 200, response.text)

        log_data_page_0 = response.json()
        self.assertEqual(len(log_data_page_0["entries"]), 200)
        self.assertEqual(log_data_page_0["more"], True)
        self.assertEqual(log_data_page_0["next"], 1)

        zero_entry = log_data_page_0["entries"][0]
        self.assertEqual(zero_entry["message"], u"Called w3afCore.start()")
        self.assertEqual(zero_entry["severity"], None)
        self.assertEqual(zero_entry["type"], "debug")
        self.assertIsNotNone(zero_entry["time"])

        response = requests.get("%s/scans/0/log?page=1" % self.api_url)
        self.assertEqual(response.status_code, 200, response.text)

        self.assertNotEqual(log_data_page_0["entries"], response.json()["entries"])
 def Download(self):
     for url in self.urls:
         target = requests.get(url)
         if target.status_code != 200:
             print("Cannot Retreive web page.\n")
             continue
         j = json.loads(target.text)
         title = j["title"]
         title = title.replace(" ", "")
         title = re.sub(r"[\W_]+", "", title)
         image = j["img"]
         # add path to the imagename
         imagename = title + "." + image[len(image) - 3 :]
         # --check if file already exists
         if os.path.isfile(imagename):
             print("File " + imagename + " already exists.")
             continue
         # ---
         r = requests.get(image)
         if r.status_code != 200:
             print("Cannot Retreive Image\n")
             continue
         with open(imagename, "wb") as f:
             print("Downloading image -- " + title)
             for chunk in r.iter_content(1024):
                 f.write(chunk)
def status():
    with open(PROJECTS_FILE) as file:
        projects = json.load(file)

    try:
        for project in projects:
            _, host, path, _, _, _ = urlparse(project["travis url"])
            api_url = "https://api.{host}/repos{path}".format(**locals())
            resp = get(api_url)

            # See if the Github URL has moved.
            if resp.status_code == 404:
                github_url = "https://github.com{path}".format(**locals())
                resp = get(github_url)

                if resp.status_code == 200:
                    _, _, github_path, _, _, _ = urlparse(resp.url)

                    if github_path != path:
                        message = "Error in {guid}: {path} has moved to {github_path}"
                        kwargs = dict(guid=project["guid"], **locals())
                        raise Exception(message.format(**kwargs))

            if resp.status_code != 200:
                message = "Missing {guid}: no {travis url}"
                raise Exception(message.format(**project))
    except Exception as e:
        status = str(e)
    else:
        status = "ok"

    return jsonify(dict(status=status, updated=int(time()), dependencies=["Travis", "Github"], resources={}))
Example #16
0
def at(nick, channel, arguments, sender, config, irc):
    try:
        r = requests.get(config.get("at", "spaceapi_url"))
        if callable(r.json):
            j = r.json()
        else:
            j = r.json

        msg = []
        msg.append(u"Hackerspace jest " + (u"otwarty" if j["state"]["open"] else u"zamknięty"))

        any_lights = False
        lights_str = []
        for room, state in j["sensors"]["ext_lights"][0].items():
            if state == True:
                any_lights = True
                lights_str.append(room)

        if any_lights:
            msg.append(u"Światło zaświecone w: " + ", ".join(lights_str) + ".")
        else:
            msg.append(u"Światła pogaszone.")

        helpers.msg(irc.client, channel, nick + ": " + ", ".join(msg))

    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
        helpers.msg(irc.client, channel, nick + u": Spaceapi… nie działa.")

    try:
        r = requests.get(config.get("at", "whois_url"))
        if callable(r.json):
            j = r.json()
        else:
            j = r.json
        l = len(j["users"])

        if l == 0:
            msg = u"Żywego ducha nie uświadczysz…"
        else:
            if int(l + j["total_devices_count"]) == 1:
                devnoun = u"urządzenie"
            elif (
                int(l + j["total_devices_count"]) % 10 >= 2
                and int(l + j["total_devices_count"]) % 10 <= 4
                and int(l + j["total_devices_count"]) > 14
            ):
                devnoun = u"urządzenia"
            else:
                devnoun = u"urządzeń"

            msg = u"%s %s, w tym białkowe: %s" % (l + j["total_devices_count"], devnoun, ", ".join(j["users"]))

        helpers.msg(irc.client, channel, nick + u": " + msg)

    except:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)
        helpers.msg(irc.client, channel, nick + u": Whois… nie działa.")
Example #17
0
    def test_status_raising(self):
        r = get(httpbin("status", "404"))
        self.assertRaises(HTTPError, r.raise_for_status)

        r = get(httpbin("status", "200"))
        self.assertFalse(r.error)
        r.raise_for_status()
Example #18
0
def _download_xbrl_file(info_dicts, p):
    no = p
    directory_path = os.getcwd() + "/xbrl_files/"
    while no < len(info_dicts):
        info_dict = info_dicts[no]
        no += proc

        # 証券CDごとのディレクトリ作成
        company_path = directory_path + info_dict["cd"] + "/"
        ir_path = company_path + info_dict["id"]
        make_directory(company_path)

        #  証券コード×IR情報ごとのディレクトリ作成
        if os.path.exists(ir_path):
            continue
        make_directory(ir_path)
        print("Process(" + str(p + 1) + "):downloading:" + info_dict["update"] + "_" + info_dict["title"])

        url = info_dict["url"]
        r = requests.get(url)
        if r.ok:
            # Requestによりファイルを取得して、Unzipする
            r = requests.get(url)
            z = ZipFile(io.BytesIO(r.content))
            z.extractall(ir_path)  # unzip the file and save files to path.
Example #19
0
def Tuling(info):
    url = api_url + "?key=" + key + "&info=" + info
    s = requests.get(url).json()
    q = s["text"]
    url = youdao_url + q
    s = requests.get(url).json()
    return s["translation"][0] + "\n" + q
Example #20
0
 def _send_command(self, command_id):
     """Send a command to MPC-HC via its window message ID."""
     try:
         params = {"wm_command": command_id}
         requests.get("{}/command.html".format(self._url), params=params, timeout=3)
     except requests.exceptions.RequestException:
         _LOGGER.error("Could not send command %d to MPC-HC at: %s", command_id, self._url)
Example #21
0
def deploy():
    user = request.form["username"]
    repo = request.form["repository"]
    do_api = os.getenv("DO_API")
    do_client = os.getenv("DO_CLIENT")
    do_payload = {
        "name": "testing",
        "client_id": do_client,
        "api_key": do_api,
        "size_id": "66",
        "image_id": "3447912",
        "region_id": "4",
        "ssh_key_ids": "122283",
    }
    droplet = requests.get("https://api.digitalocean.com/droplets/new", params=do_payload).json()
    time.sleep(55)
    droplet = requests.get(
        "https://api.digitalocean.com/droplets/{0}".format(droplet["droplet"]["id"]),
        params={"client_id": do_client, "api_key": do_api},
    ).json()
    print droplet["droplet"]["ip_address"]
    ssh.connect(droplet["droplet"]["ip_address"], username="root", allow_agent=True)
    _, stdout, stderr = ssh.exec_command(
        "apt-add-repository -y ppa:chris-lea/node.js;apt-get update;apt-get install -y git ruby python-dev python-pip nodejs build-essential; git clone https://github.com/{0}/{1}.git; cd {1}; sh build.sh".format(
            user, repo
        )
    )
    while not stdout.channel.exit_status_ready():
        print stdout.read()
    ssh.close()
    return jsonify({"ip": droplet["droplet"]["ip_address"]})
    def query_api(self):
        url = "https://api.airbnb.com/v2/listings/"
        url += str(self.lid)
        url += "?client_id=3092nxybyb0otqw18e8nh5nty&_format=v1_legacy_for_p3"

        r = json.loads(requests.get(url).text.encode("utf-8"))

        # API rate limit exceeded -- pause <wait_mins> minutes
        wait_mins = 10
        while "error_code" in r.keys() and r["error_code"] == 503:
            sys.stderr.write("API limit exceeded -- waiting ")
            for i in range(wait_mins):
                sys.stderr.write(str(wait_mins - i) + " more minutes... ")
                time.sleep(60)
            r = json.loads(requests.get(url).text.encode("utf-8"))
            sys.stderr.write("\n")

        # other error codes, e.g. searched too far back
        if "error_code" in r.keys():
            sys.stderr.write(str(self.lid))
            sys.stderr.write(str(r))
            sys.stderr.write("\n")
            return None

        return r
Example #23
0
def get_celebrities(html_doc):
    celebs = html_doc.xpath('//div[@class="celeb"]')
    for celeb in celebs:
        link = celeb.xpath("descendant::a[2]")[0]
        name = link.text.strip()
        name = re_num.sub("", name).strip().encode("utf-8")

        print "retrieving: {name}...".format(name=name)

        url = "http://www.imdb.com/search/name?name={name}".format(name=name.replace(" ", "+"))
        celeb_search_html = requests.get(url).content
        celeb_search_doc = lxml.html.fromstring(celeb_search_html)
        target_elem = celeb_search_doc.xpath('//table[@class="results"]/*/td[@class="name"]/a')
        if not target_elem:
            continue
        target_elem = target_elem[0]
        celeb_url = urlparse.urljoin("http://www.imdb.com", target_elem.attrib["href"])

        celeb_doc = lxml.html.fromstring(requests.get(celeb_url).content)
        image_elem = celeb_doc.xpath('//td[@id="img_primary"]/a/img')
        if not image_elem:
            continue
        image_elem = image_elem[0]
        celeb_image_url = image_elem.attrib["src"]

        r = requests.get(celeb_image_url)
        if r.status_code == 200:
            with open("orig/{name}.jpg".format(name=name), "wb") as f:
                for chunk in r.iter_content():
                    f.write(chunk)
        time.sleep(1)
Example #24
0
def GetInstitutions(page, name=None):
    if name is None:
        return json.loads(requests.get("%s?page=%d" % (RESTFUL_INSTITUTION, page)).text)
    else:
        q_value = '{"filters":[{"name":"name","op":"like","val":"%%%s%%"}]}' % name
        q_value = urllib.quote(q_value.encode("utf-8"))
        return json.loads(requests.get("%s?page=%d&q=%s" % (RESTFUL_INSTITUTION, page, q_value)).text)
Example #25
0
 def test_connect_timeout(self):
     try:
         requests.get(TARPIT, timeout=(0.1, None))
         assert False, "The connect() request should time out."
     except ConnectTimeout as e:
         assert isinstance(e, ConnectionError)
         assert isinstance(e, Timeout)
Example #26
0
def GetBulletins(page, title=None):
    if title is None:
        return json.loads(requests.get("%s?page=%d" % (RESTFUL_BULLETIN, page)).text)
    else:
        q_value = '{"filters":[{"name":"title","op":"like","val":"%%%s%%"}]}' % title
        q_value = urllib.quote(q_value.encode("utf-8"))
        return json.loads(requests.get("%s?page=%d&q=%s" % (RESTFUL_BULLETIN, page, q_value)).text)
Example #27
0
    def test_basicauth_with_netrc(self):
        auth = ("user", "pass")
        wrong_auth = ("wronguser", "wrongpass")
        url = httpbin("basic-auth", "user", "pass")

        def get_netrc_auth_mock(url):
            return auth

        requests.sessions.get_netrc_auth = get_netrc_auth_mock

        # Should use netrc and work.
        r = requests.get(url)
        assert r.status_code == 200

        # Given auth should override and fail.
        r = requests.get(url, auth=wrong_auth)
        assert r.status_code == 401

        s = requests.session()

        # Should use netrc and work.
        r = s.get(url)
        assert r.status_code == 200

        # Given auth should override and fail.
        s.auth = wrong_auth
        r = s.get(url)
        assert r.status_code == 401
Example #28
0
def notify(sender, **kwargs):
    params = {}
    params["to"] = kwargs["instance"].sender
    params["text"] = "This is the reply"
    params["username"] = "kanneluser"
    params["password"] = "df89asj89I23hvcxSDasdf3298jvkjc839"
    requests.get("http://localhost:13777/cgi-bin/sendsms", params=params)
Example #29
0
 def test_unicode_get(self):
     url = httpbin("/get")
     requests.get(url, params={"foo": "føø"})
     requests.get(url, params={"føø": "føø"})
     requests.get(url, params={"føø": "føø"})
     requests.get(url, params={"foo": "foo"})
     requests.get(httpbin("ø"), params={"foo": "foo"})
Example #30
0
def __fetch_crits_object_ids(config, src, endpoint, params):
    # TODO refactor this and merge with fetch_crits_object_ids() /
    #      split into smaller functions
    """fetch all crits object ids from endpoint and return a list"""
    url = crits_url(config, src)
    attempt_certificate_validation = config["crits"]["sites"][src]["api"]["attempt_certificate_validation"]
    if not attempt_certificate_validation:
        requests.packages.urllib3.disable_warnings()
    if config["crits"]["sites"][src]["api"]["ssl"]:
        r = requests.get(url + endpoint + "/", params=params, verify=attempt_certificate_validation)
    else:
        r = requests.get(url + endpoint + "/", params=params)
    json_output = r.json()
    object_count = int(json_output[u"meta"][u"total_count"])
    max_results = config["crits"]["sites"][src]["api"]["max_results"]
    if object_count > max_results:
        page_count = object_count // max_results
        if object_count % max_results > 0:
            page_count += 1
    else:
        page_count = 0
    object_ids = list()
    params["limit"] = max_results
    i = 0
    while i <= page_count:
        params["offset"] = i * max_results
        if config["crits"]["sites"][src]["api"]["ssl"]:
            r = requests.get(url + endpoint + "/", params=params, verify=attempt_certificate_validation)
        else:
            r = requests.get(url + endpoint + "/", params=params)
        json_output = r.json()
        for object_ in json_output.get("objects", []):
            object_ids.append(object_[u"_id"].encode("ascii", "ignore"))
        i += 1
    return object_ids