Beispiel #1
0
 def query_opennms(self, **kwargs: str) -> None:
     login, password = self.opennms_login, kwargs["password"]
     Session.commit()
     json_devices = http_get(
         self.opennms_devices,
         headers={"Accept": "application/json"},
         auth=(login, password),
     ).json()["node"]
     devices = {
         device["id"]: {
             "name": device.get("label", device["id"]),
             "description": device["assetRecord"].get("description", ""),
             "location": device["assetRecord"].get("building", ""),
             "vendor": device["assetRecord"].get("manufacturer", ""),
             "model": device["assetRecord"].get("modelNumber", ""),
             "operating_system": device.get("operatingSystem", ""),
             "os_version": device["assetRecord"].get("sysDescription", ""),
             "longitude": device["assetRecord"].get("longitude", 0.0),
             "latitude": device["assetRecord"].get("latitude", 0.0),
         }
         for device in json_devices
     }
     for device in list(devices):
         link = http_get(
             f"{self.opennms_rest_api}/nodes/{device}/ipinterfaces",
             headers={"Accept": "application/json"},
             auth=(login, password),
         ).json()
         for interface in link["ipInterface"]:
             if interface["snmpPrimary"] == "P":
                 devices[device]["ip_address"] = interface["ipAddress"]
                 factory("Device", **devices[device])
def fill_in_username(request):
    """
    找回密码第一步:填写账户名
    :param request:
    :return:
    """
    ret_data = {}
    ret_k = None
    try:
        args = request.POST
        session = request.session
        username = args["username"].strip()
        auth_code = args.get("captcha_code", "")
        cookies = session.get("req_cookie")
        other_name = session.get("request_data", {}).get("other_name")
        other_value = session.get("request_data", {}).get("other_value")
        uuid = session.get("request_data", {}).get("uuid")
        post_url = "{host}/findPwd/doIndex.action?&uuid={uuid}&sourceId={sourceId}&" \
                   "authCode={authCode}&username={username}&eid={eid}&fp={fp}&" \
                   "{o_name}={o_value}".format(host=host, uuid=uuid, sourceId=sourceId,
                                               authCode=auth_code, username=username, eid=eid,
                                               fp=fp, o_name=other_name, o_value=other_value)
        ret_json = http_get(post_url, headers=HEADERS, cookies=cookies, verify=False).json()
        result_code = ret_json.get("resultCode")
        if result_code == "ok":
            msg = "第一步成功"
            ret_k = ret_json.get("k")
        elif result_code == "authCodeFailure":
            msg = "验证码错误"
        elif result_code == "none":
            msg = "您输入的账户名不存在,请核对后重新输入"
        elif result_code == "usernameFailure":
            msg = "请输入用户名"
        else:
            msg = "网络连接超时,请重新修改登录密码"

        if ret_k:
            try:
                url = "https://safe.jd.com/findPwd/findPwd.action?k={ret_k}".format(ret_k=ret_k)
                page = http_get(url, headers=HEADERS, verify=False, timeout=timeout).text
                element = html.fromstring(page)
                nickname = element.xpath('//div[@id="mobileDiv"]/div[1]/div[1]/strong/text()')[0]
            except Exception:
                msg = "您的账户信息异常,暂时限制找回密码!"
                add_ajax_error_json(ret_data, msg)
            else:
                ret_data["ret_k"] = ret_k
                add_ajax_ok_json(ret_data)
        else:
            add_ajax_error_json(ret_data, msg)
    except Exception:
        add_ajax_error_json(ret_data, "找回密码第一步:填写账户名失败")

    return JsonResponse(ret_data)
Beispiel #3
0
    def get_leadearboard(self):
        """
        Gets all the hackers from the leaderboard of a certain contest.
        """

        # This list will store all the hackers obtained on the leaderboard.
        all_hackers: List[Dict[str, Union[int, str]]] = []

        # Begin by setting an offset of 0, increment by setting as offset all
        # the added.
        offset = 0

        print(self.render_leaderboard_link(offset, LEADERBOARD_LIMIT))
        # Make initial request
        response = http_get(self.render_leaderboard_link(
            offset, LEADERBOARD_LIMIT),
                            headers=self.headers,
                            cookies=self.cookies)
        # Make a json from it
        json_response = load_json_str(response.text)

        # Ge the total of hackers
        total = json_response["total"]

        while len(all_hackers) < total:

            # Get the new hackers from the response
            new_hackers = self.parse_new_hackers(response)

            # Add the new hackers to the complete list
            all_hackers.extend(new_hackers)

            # The offset will be all the hackers so far
            offset = len(all_hackers)

            # Request a new set of hackers
            response = http_get(self.render_leaderboard_link(
                offset, LEADERBOARD_LIMIT),
                                headers=self.headers,
                                cookies=self.cookies)

        # Filter hackers
        if self.username_filter:
            all_hackers = list(
                filter(lambda x: x["hacker"] in self.username_filter,
                       all_hackers))
        # Retrun the complete list of hackers
        return self.filter_on_time(all_hackers)
Beispiel #4
0
    def __get(self):
        # Article identifiers can contain slashes which must not be interpreted
        # as directory delimiters at any file system operation.
        # https://de.wikipedia.org/wiki/Bob_Marley/Auszeichnungen_f%C3%BCr_Musikverk%C3%A4ufe
        safe_identifier = self.identifier.replace('/', '_')

        if self.cache_directory is None:
            cache_file = None
        else:
            cache_file = path.join(self.cache_directory,
                                   safe_identifier + '.html')

        text = ''

        # load cached text if file exists
        if cache_file is not None and path.exists(cache_file):
            logging.info(self.__str__() + ' (from cache)')
            with open(cache_file, 'r', encoding='utf-8') as file:
                text = file.read()

        # If text variable is empty either the cache file did not exist or it
        # did not contain any data for whatever reason.
        # In this case we load the data via http and save it to the cache file.
        if text.strip() == '':
            logging.info(self.__str__())
            text = http_get(self.__str__()).text

            if cache_file is not None:
                with open(cache_file, 'w', encoding='utf-8') as file:
                    file.write(text)

        return text
    def _grab_auth_token(self,
                         repository: str,
                         repo_scope: str = 'pull') -> str:
        cache_ident = repository + "_" + repo_scope

        if cache_ident in self._token_cache:
            return self._token_cache[cache_ident]

        scope = 'repository:' + repository + ':%s' % repo_scope
        service = urlsplit(self._host).netloc
        url = self._auth_url + '?scope=%s&service=%s' % (scope, service)

        response = http_get(url=url,
                            auth=HTTPBasicAuth(self._username, self._password))
        json = response.json()

        if "token" not in json:
            raise RegistryException(
                'Cannot get authentication token, maybe invalid user/password?',
                response.text)

        token = response.json()['token']
        self._token_cache[cache_ident] = token

        return token
Beispiel #6
0
def loadSiteZIP(site, oldSite, force):
    outDir = path.join(SITEDIR, site['name'])
    site['dir'] = outDir

    if oldSite and not force and site['src'] == oldSite['src']:
        return

    newDir = '%s___new' % outDir
    oldDir = '%s___old' % outDir

    rmtree(newDir, ignore_errors=True)
    rmtree(oldDir, ignore_errors=True)
    rmtree(newDir, ignore_errors=True)

    mkdir(newDir)

    r = http_get(site['src'], stream=True)
    z = ZipFile(BytesIO(r.content))
    z.extractall(newDir)

    try:
        rename(outDir, oldDir)
    except FileNotFoundError:
        pass

    rename(newDir, outDir)
    rmtree(oldDir, ignore_errors=True)
Beispiel #7
0
    def _get(self, url):
        with Cache(self.config_path) as cache:
            if not self.cache or url not in cache:
                self.bucket.consume()
                logger.trace(url)
                try:
                    r = http_get(url,
                                 headers={
                                     'User-Agent':
                                     'illallangi-redactedapi/0.0.1',
                                     'Authorization': f'{self.api_key}'
                                 })
                    r.raise_for_status()
                except HTTPError as http_err:
                    logger.error(f'HTTP error occurred: {http_err}')
                    cache.set(url, None, expire=self.failure_expiry)
                    return
                except Exception as err:
                    logger.error(f'Other error occurred: {err}')
                    cache.set(url, None, expire=self.failure_expiry)
                    return
                logger.debug('Received {0} bytes from API'.format(
                    len(r.content)))

                logger.trace(r.request.url)
                logger.trace(r.request.headers)
                logger.trace(r.headers)
                logger.trace(r.text)
                cache.set(url,
                          r.json()['response'],
                          expire=self.success_expiry)
            return cache[url]
def get_img_captcha(request):
    """
    获取登录图片验证码
    :param request:
    :return:
    """
    ret_data = {}
    args = request.POST
    username = args["username"].strip()
    account_type = args["account_type"]
    if not username:
        add_ajax_error_json(ret_data, "用户名为空")
        return JsonResponse(ret_data)

    key = username + ACCOUNT_CRAWLING_IMG_HEADERS_SSDB_SUFFIX + account_type
    try:
        ssdb_conn = get_ssdb_conn()
        headers_data = ssdb_conn.get(key)
        if headers_data is not None:
            headers_data_dic = json_loads(headers_data)
            tmp_headers = headers_data_dic.get("headers")
            uuid = headers_data_dic.get("uuid")
            captcha_url = "https://authcode.jd.com/verify/image?a=1&acid={uuid}&" \
                          "yys={stime}".format(uuid=uuid, stime=get_js_time())
            img_content = http_get(captcha_url, headers=tmp_headers, verify=False).content
            ret_data["img_data"] = bytes.decode(b64encode(img_content))
        else:
            add_ajax_error_json(ret_data, "无法获取验证码")
    except Exception:
        add_ajax_error_json(ret_data, "无法获取验证码")
    else:
        add_ajax_ok_json(ret_data)

    return JsonResponse(ret_data)
Beispiel #9
0
def upload_photo():
    """上传照片到七牛,并返回私有链接地址"""
    from qiniu import Auth
    from qiniu import put_file

    global config
    progress_handler = lambda progress, total: progress

    photo_path = http_get("http://127.0.0.1:9876/photo/shot").content
    # Upload to qiniu
    mime_type = "image/jpeg"
    auth = Auth(str(config["qiniu"]["api_key"]), str(config["qiniu"]["secret"]))
    print auth
    filename = os.path.basename(photo_path)
    print "filename: ", filename, type(filename)
    token = auth.upload_token(str(config["qiniu"]["bucket"]))
    print token
    ret, info = put_file(token, filename, photo_path, {}, mime_type, progress_handler=progress_handler)
    print "uploaded: ", ret, info
    try:
        os.remove(photo_path)
    except Exception:
        pass

    # Return URL
    base_url = "{}/{}".format(str(config["qiniu"]["domain"]), filename)
    return auth.private_download_url(base_url, expires=3600)
def get_img_captcha(request):
    """获取图片验证码"""
    ret_data = {}
    args = request.POST
    username = args["username"].strip()
    account_type = args["account_type"]
    url = "http://shop.10086.cn/i/authImg?t=" + str(rand_0_1())

    if not username:
        add_ajax_error_json(ret_data, "用户名为空")
        return JsonResponse(ret_data)

    key = username + ACCOUNT_CRAWLING_IMG_HEADERS_SSDB_SUFFIX + account_type
    try:
        ssdb_conn = get_ssdb_conn()
        headers = ssdb_conn.get(key)
        if headers is not None:
            img_content = http_get(url, headers=eval(headers)).content
            ret_data["img_data"] = bytes.decode(b64encode(img_content))
        else:
            add_ajax_error_json(ret_data, "无法获取图片验证码")
    except Exception:
        add_ajax_error_json(ret_data, "无法获取图片验证码")
    else:
        add_ajax_ok_json(ret_data)

    return JsonResponse(ret_data)
Beispiel #11
0
    def _get_a_proxy(self):
        req_header = {
            'User-Agent':
            "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0",
            'Accept':
            'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh',
            'Connection': 'close',
        }
        check_url = "http://shop.10086.cn/i/v1/fee/real/15928016431?_=" + get_js_time(
        )

        for proxy in self.proxy_api.get_proxy_all():
            if proxy in self.bad_proxy:
                continue

            try:
                resp = http_get(check_url,
                                headers=req_header,
                                timeout=3,
                                proxies={'https': 'https://' + proxy})
                if b'"retCode":"' in resp.content:
                    resp = get_web_html_by_proxy(
                        "https://login.10086.cn/captchazh.htm?type=12",
                        proxies={'https': 'https://' + proxy})
                    if resp is not None:
                        return proxy
            except Exception:
                self.logger.error("proxy error")

            self.bad_proxy.add(proxy)
        else:
            return rand_choice(list(self.good_proxy))
def _get_captcha_body(request):
    cookies = request.session["req_cookie"]
    headers = DEFAULT_REQUEST_HEADERS.copy()
    headers['User-Agent'] = USER_AGENT
    headers["Referer"] = "https://ipcrs.pbccrc.org.cn/page/login/loginreg.jsp"
    return http_get("https://ipcrs.pbccrc.org.cn/imgrc.do?a=" + get_js_time(),
                    headers=headers, cookies=cookies.get_dict(), verify=False).content
Beispiel #13
0
    def api_get(self, url, passkey=False, apikey=False, **kwargs):
        """ Получить страницу API.

        :param url: адрес получаемой страницы, полностью или p/a/th?param=1
        :type url: :py:class:`str`
        :param passkey: использовать для запроса passkey
        :type passkey: :py:class:`bool`
        :param apikey: использовать для запроса apikey
        :type apikey: :py:class:`bool`
        :param kwargs: остальные параметры отправляемые в requests.get
        :type kwargs: :py:class:`dict`
        :return: ответ на запрос
        :raises RuntimeError: вызывается в если в ответе приходит блок error
            или если запрос возвращает некорректный http-статус
        """
        full_url = url.startswith('http://')
        url = full_url and url or self.base_api_url + url.lstrip('/')
        params = kwargs.pop('params', {})
        params['format'] = kwargs.pop('format', 'json')
        if passkey:
            params['passkey'] = self.passkey
        if apikey:
            params['apikey'] = self.apikey
        req = http_get(url, params={} if full_url else params, **kwargs)
        if req.status_code != codes.ok:
            msg = 'cinemate return invalid status code {code} for {url}'
            raise RuntimeError(msg.format(code=req.status_code, url=req.url))
        error = req.json().get('error')
        if error:
            raise RuntimeError(error)
        return req
Beispiel #14
0
def get_gif_url_tenor(params):
    api_key = get_tenor_api_key()
    if not api_key:
        logger.critical("NO API KEY FOR Tenor!")
        exit(-1)

    base_url = "https://api.tenor.com/v1/search?"
    search_params = "+".join(params)
    idx = randint(1, 50)
    url_params = {
        "q": search_params,
        "pos": idx,
        "limit": 1,
        "key": api_key,
        "contentfilter": "off",
        "media_filter": "basic",
        "ar_range": "all",
    }
    logger.debug("Tenor - GET: {}".format(url_params))
    r = http_get(base_url, params=url_params)
    if r.status_code != 200:
        logger.error("Could not get tenor content!")
        logger.error("{} - {}".format(r.status_code, r.text))
        r.raise_for_status()
    gif_url = r.json()["results"][0]["url"]
    return gif_url
def get_img_captcha_find_password(request):
    """
    获取找回密码图片验证码
    :param request:
    :return:
    """
    ret_data = {}
    try:
        uuid = request.session.get("request_data", {}).get("uuid")
        code_url = "https://authcode.jd.com/verify/image?acid=%s&srcid=%s&_t=%s" \
                   % (uuid, sourceId, get_js_time())
        captcha_headers = HEADERS.copy()
        captcha_headers.update({
            "Host": "authcode.jd.com",
            "Referer": "https://safe.jd.com/findPwd/index.action",
            "Accept": "image/webp,image/*,*/*;q=0.8"
        })
        code_content = http_get(code_url, headers=captcha_headers, verify=False).content
        ret_data["img_data"] = bytes.decode(b64encode(code_content))
    except Exception:
        add_ajax_error_json(ret_data, "无法获取验证码")
    else:
        add_ajax_ok_json(ret_data)

    return JsonResponse(ret_data)
    def check_dependencies(visited: str, dependencies: Iterable[str],
                           executor: _base.Executor) -> bool:
        visited_set = set(visited.split(','))
        filtered_dep = filter(
            lambda x: PORT_APP[int(x.split(':')[1])] not in visited_set,
            dependencies)

        # futures = []
        # for target in filtered_dep:
        #     get_url = f'http://{target}/?from={visited}'
        #     future = executor.submit(lambda: http_get(get_url))
        #     futures.append((get_url, future))
        #
        # for (get_url, future) in futures:
        #     result = future.result()
        #     is_ok = result.status_code == 200
        #     if not is_ok:
        #         return False

        for target in filtered_dep:
            get_url = f'http://{target}/?from={visited}'
            response = http_get(get_url)
            is_ok = response.status_code == 200
            if not is_ok:
                return False

        return True
Beispiel #17
0
	def _ping_server(self):
		try:
			response = http_get(self._server_ping_address, timeout=self.timeout)
			assert response.status_code == 200 and response.text == 'pong'
		except Exception as ex:
			logger.critical("Pinging the server failed! Shipping will probably fail!")
			logger.critical("Exception msg: [%s]" % str(ex))
def send_sms_code_find_password(request):
    """
    找回密码发送短信验证码
    :param request:
    :return:
    """
    ret_data = {}
    succ = False
    try:
        ret_k = request.POST.get("ret_k", "")
        post_url = "{host}/findPwd/getCode.action?k={ret_k}".format(host=host, ret_k=ret_k)
        ret_page = http_get(post_url, headers=HEADERS).json()
        if ret_page == 0:
            msg = "短信发送成功"
            succ = True
        elif ret_page == "kError":
            msg = "参数错误"
        elif ret_page == 503:
            msg = "120秒内仅能获取一次验证码,请稍后重试"
        elif ret_page == 504:
            msg = "您申请获取短信验证码的次数过多,请于24小时后重试"
        elif ret_page == "lock":
            msg = "您的账户信息异常,暂时限制找回密码"
        elif isinstance(ret_page, dict) and ret_page.get("resultMessage"):
            msg = ret_page["resultMessage"]
        else:
            msg = "发送短信验证码失败,未知错误"
        if succ:
            add_ajax_ok_json(ret_data)
        else:
            add_ajax_error_json(ret_data, msg)
    except Exception:
        add_ajax_error_json(ret_data, "找回密码发送短信验证码失败")

    return JsonResponse(ret_data)
Beispiel #19
0
def get_sohu_img_captcha(request):
    """获取图片验证码"""
    ret_data = {}
    args = request.POST
    username = args["username"].strip()
    account_type = args["account_type"]

    if not username:
        add_ajax_error_json(ret_data, "用户名为空")
        return JsonResponse(ret_data)

    header_key = username + ACCOUNT_CRAWLING_IMG_HEADERS_SSDB_SUFFIX + account_type
    url_key = username + ACCOUNT_CRAWLING_IMG_URL_SSDB_SUFFIX + account_type
    try:
        ssdb_conn = get_ssdb_conn()
        cookies_dict = ssdb_conn.get(header_key)
        if cookies_dict:
            cookies_dict = eval(cookies_dict)
            captcha_url = ssdb_conn.get(url_key)
            img_content = http_get(captcha_url, cookies=cookies_dict).content
            ret_data["img_data"] = bytes.decode(b64encode(img_content))
        else:
            add_ajax_error_json(ret_data, "无法获取图片验证码")
    except Exception:
        add_ajax_error_json(ret_data, "无法获取图片验证码")
    else:
        add_ajax_ok_json(ret_data)

    return JsonResponse(ret_data)
Beispiel #20
0
    def download(self):
        response = http_get(self._download_url)
        response.raise_for_status()

        resource_id, resource_container = self._validate(response)

        return resource_id, resource_container
Beispiel #21
0
    def getKvAreas(self, area_type, parent_area_id=False):
        area_types = {
            'county': 'counties',
            'parish': 'parishes',
            'city': 'cities',
        }

        if area_type not in list(area_types.keys()):
            exit('invalid param area_type')

        if parent_area_id:
            parent_area_list_index = list(
                area_types.keys()).index(area_type) - 1
            parent_area_type = list(area_types.keys())[parent_area_list_index]

        url = f'http://api.kv.ee/api/{area_types[area_type]}'
        params = {
            'pagination': False,
        }

        if parent_area_id:
            params[f'{parent_area_type}_id'] = parent_area_id

        response = http_get(url, params=params)

        return response.json()
def _get_mobile_bills_sms_captcha(args):
    """移动发送账单短信验证码"""
    ret_data = {}
    username = args["username"].strip()
    form_data = {"callback": "jQuery1830" + str(randint(1E16, 1E17 - 1)) + "_" + get_js_time(),
                 "_": get_js_time(),
                 }
    url = "https://shop.10086.cn/i/v1/fee/detbillrandomcodejsonp/" + username + "?" + urlencode(form_data)

    key = username + ACCOUNT_CRAWLING_SMS_HEADERS_SSDB_SUFFIX + args["account_type"]
    try:
        ssdb_conn = get_ssdb_conn()
        headers = ssdb_conn.get(key)
        if headers is not None:
            sms_content = http_get(url, headers=eval(headers), verify=False).content.decode()
            if '"retCode":"000000"' in sms_content:  # 成功
                add_ajax_ok_json(ret_data)
            elif '"retCode":"570007"' in sms_content:  # 系统繁忙!
                add_ajax_error_json(ret_data, "系统繁忙,请重试。")
            else:
                add_ajax_error_json(ret_data, sms_content)
        else:
            add_ajax_error_json(ret_data, "无法获取短信验证码,请刷新页面重试!")
    except Exception:
        add_ajax_error_json(ret_data, "获取短信验证码失败,请重试。")

    return JsonResponse(ret_data)
Beispiel #23
0
 def get_json(self, basecurrency):
     url = '{base}{question}{query}'.format(base=exchangesrates_url,
                                            question='?',
                                            query=urlencode(
                                                {'base': basecurrency}))
     self._logger.debug('ExchangeRates API requested {url}'.format(url=url))
     return http_get(url).json()
def _get_unicom_bills_sms_captcha(args):
    """联通发送一般短信验证码"""
    ret_data = {}
    username = args["username"].strip()
    the_time = get_js_time()

    form_data = {'mobile': username,
                 'req_time': the_time,
                 '_': int(the_time) + 1,
                 'callback': "jQuery1720" + str(randint(1E16, 1E17 - 1)) + "_" + the_time
                 }
    # url = "https://uac.10010.com/portal/Service/SendMSG?" + urlencode(form_data)
    url = "https://uac.10010.com/portal/Service/SendMSG"
    key = username + ACCOUNT_CRAWLING_SMS_HEADERS_SSDB_SUFFIX + args["account_type"]

    try:
        ssdb_conn = get_ssdb_conn()
        headers = ssdb_conn.get(key)
        if headers is not None:
            sms_content = http_get(url, headers=eval(headers), params=form_data, verify=False).text
            if 'resultCode:"0000"' in sms_content:
                add_ajax_ok_json(ret_data)
            elif 'resultCode:"7096"' in sms_content:  # 验证码请求过快
                add_ajax_error_json(ret_data, "验证码请求过快,请稍后再试。")
            elif 'resultCode:"7098"' in sms_content:  # 7098谁请求达到上限
                add_ajax_error_json(ret_data, "请求短信验证码达到上限,请明天再试!")
            else:
                add_ajax_error_json(ret_data, "发送失败:" + sms_content)
        else:
            add_ajax_error_json(ret_data, "无法获取短信验证码,请刷新页面重试!")
    except Exception:
        add_ajax_error_json(ret_data, "无法获取短信验证码,请重试。")

    return JsonResponse(ret_data)
Beispiel #25
0
    def getListingData(self):
        # warning: kv listing data html isn't in the cleanest
        # so extracting data is quite messy
        data = {}

        request = http_get(self.link)
        soup = BeautifulSoup(request.text, 'html.parser')

        price_info = soup.find('div', 'object-price')
        price = price_info.findChild('strong').text.strip()
        price = re.search(r'(\d+)', price)
        if price:
            data.update({'price': int(price.group())})

        main_info_grid = soup.find_all('table', 'object-data-meta')[-1]
        for row in main_info_grid.findChildren('tr'):
            key = row.findChild('th')
            val = row.findChild('td')
            if key and val:
                key = key.text.lower().strip()
                if key in self.switcher:
                    new = self.switcher[key](val.text.strip())
                    data.update(new)

        coordinates = soup.find('a',
                                'icon icon-new-tab gtm-object-map')['href']
        coordinates = re.findall(r'(\d{2}.\d{7})', coordinates)
        data.update({'coordinates': coordinates})

        return data

        def __repr__(self):
            return str(self.data.price)
Beispiel #26
0
def download_files(choices):
    dir_name = slugify(choices['title'])
    os.mkdir(dir_name)

    for song in choices['songs'][:]:
        response = http_get(song['url'], allow_redirects=True, stream=True)

        extension = guess_extension(response.headers['content-type'])
        print(f"Extension: {extension}")
        if extension == None:
            print("Cannot guess filename extension")
            choices["songs"].remove(song)
            continue

        filename_no_ext  = slugify(song['title'])
        print(f"Filename: {filename_no_ext}")
        filename = filename_no_ext + extension
        print(f"Filename: {filename}")
        
        file_info = { 'ext': extension, 'fname': filename, "fname_no_ext": filename_no_ext }
        song['file_info'] = file_info

        total_size_in_bytes= int(response.headers.get('content-length', 0))
        block_size = 1024
        progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True, desc=filename)
        with open(os.path.join(dir_name, filename), 'wb') as file:
            for data in response.iter_content(block_size):
                progress_bar.update(len(data))
                file.write(data)
        progress_bar.close()
Beispiel #27
0
 def fetch_from_server(self, url, params=None, ok_codes=None):
     response = http_get(self.server_url + url, data=params, config=self.req_config)
     if ok_codes is None:
         ok_codes = [200, ]
     if response.status_code not in ok_codes:
         logging.error('Failure: %s%s return %s' % (self.server_url, url, response.status_code))
         return "ERROR: %s: %s" % (response.status_code, response.text)
     return response.text
Beispiel #28
0
def get_word_list(url, expected_hash, minimum_word_length=4):
    text = http_get(url).text
    hashed = sha256(text).hexdigest()

    assert hashed == expected_hash, "Unexpected hash: " + hashed + "\nResponse Text Truncated:\n" + text[:
                                                                                                         100]

    return [x for x in text.split("\n") if len(x) >= minimum_word_length]
Beispiel #29
0
 def epoch_contest_start_time(self):
     if not self._epoch_start_time:
         response = http_get(self.contest_link,
                             headers=self.headers,
                             cookies=self.cookies)
         json_response = load_json_str(response.text)
         self._epoch_start_time = json_response["model"]["epoch_starttime"]
     return self._epoch_start_time
Beispiel #30
0
def get_web_html_by_requests(url, proxies):
    start = time()
    resp = http_get(url,
                    headers=REQ_HEADER,
                    proxies=proxies,
                    timeout=ASK_TIMEOUT)
    response_time = time() - start
    resp.close()
    return response_time if 200 == resp.status_code else None
Beispiel #31
0
    def _deco(*args, **kwargs):
        # 第一个参数必须是request
        url = "https://account.chsi.com.cn/account/password!retrive.action"
        response = http_get(url, headers=HEADERS, verify=False)
        session = args[0].session
        session.set_expiry(0)
        session["req_cookie"] = dict(response.cookies)

        return func(*args, **kwargs)
def download_patch(config, time_f, time_t):
    filename = config['host']['delta']['template'] % (time_f, time_t)
    filepath = os.path.join(config['path']['temp'], filename)
    url = config['host']['delta']['url']

    # download compressed patch
    response = http_get(os.path.join(url, filename), stream=True)

    with open(filepath, 'wb') as f:
        for chunk in response.iter_content(65536):
            f.write(chunk)

    # check md5
    response = http_get(os.path.join(url, filename + '.md5sum')).text
    md5sum = response[:response.index(' ')]

    if md5sum != file_md5(filepath):
        raise PatchDownloadException('invalid hash')
Beispiel #33
0
def http_request(url, method="GET", is_img=False, get_cookies=False, cookies=None,
                 headers=None, timeout=10, referer=None, data=None, charset=None):
    """
    处理HTTP请求
    :param url:
    :param method:
    :param is_img:
    :param get_cookies:
    :param cookies:
    :param headers:
    :param timeout:
    :param referer:
    :param data:
    :param charset:
    :return:
    """
    if not url and isinstance(url, str):
        raise TypeError
    if referer is None:
        referer = "https://ipcrs.pbccrc.org.cn/"
    if headers is None:
        headers = {
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.8",
            "Host": "ipcrs.pbccrc.org.cn",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                          "Chrome/60.0.3112.113 Safari/537.36",
            "Referer": referer,
        }
    if method == "GET":
        response = http_get(url, cookies=cookies, timeout=timeout, headers=headers, verify=False)
    else:
        response = http_post(url, cookies=cookies, timeout=timeout, headers=headers, data=data, verify=False)
    if response.status_code == 200:
        if is_img:
            content = response.content
        else:
            content = response.content
            if response.text.find("charset=gbk") >= 0:
                content = content.decode("gbk")
            elif response.text.find("charset=utf-8") >= 0:
                content = content.decode("utf-8")
            else:
                if charset is not None:
                    content = content.decode(charset)
                else:
                    content = response.text
        print("下载网页成功: %s" % url)
        if get_cookies:
            cookies = response.cookies
            return [content, cookies]
        return content
    else:
        print("请求失败:%d" % response.status_code)
        return None
Beispiel #34
0
 def sliceboard(self, sliceboard_id):
     """
     Get a sliceboard object
     """
     self.sliceboard_id = sliceboard_id
     response = http_get(self.sliceboard_detail_uri + self.auth_params,
                         stream=False)
     self.sliceboard_obj = response.json()
     self.logger.info(str(self.sliceboard_obj['title']))
     assert response.status_code == 200
     return self
Beispiel #35
0
def get_data(url):
    ''' Retrieve JSON data from a url.
    '''
    logging.debug('Loading %s' % url)
    
    resp = http_get(url, headers={'User-Agent': 'Python'}, auth=http_auth)
    
    if resp.status_code not in range(200, 299):
        return None
    
    return resp.json()
Beispiel #36
0
    def duplicate(self):
        """
        Duplicate the sliceboard, after duplication
        """
        if self.sliceboard_obj is None:
            raise Exception("Need a sliceboard")
        duplicate_url = "{0.sliceboard_detail_uri}/duplicate{0.auth_params}".format(
            self)
        response = http_get(duplicate_url, stream=False)
        self.duplicate_obj = response.json()

        return self
def get_deltas(config, time_c):
    response = http_get(config['host']['delta']['url'])
    soup = bs(response.text, 'html.parser')

    for l in soup.findAll('a'):
        match = config['host']['delta']['regex'].search(l.text)
        if not match:
            continue
        time_f = int(match.group('time_f'))

        # accept if `from time` is equal or later than ours
        if time_f >= time_c:
            yield (time_f, int(match.group('time_t')))
Beispiel #38
0
def read_env(which):
    try:
        data = http_get("http://127.0.0.1:9876/sensors/env").json()
    except Exception:
        print extract_traceback()
        text = u"读取传感器数据时出错,可能 pitools 服务未运行"
    else:
        if which in ("temp", "temperature"):
            text = u"温度:{:.2f}摄氏度".format(data["temperature"])
        elif which == "pressure":
            text = u"大气压:{:.3f}千帕".format(data["pressure"] / 1000.0)
        elif which == "env":
            text = u"温度:{:.2f}摄氏度,大气压{:.3f}千帕".format(data["temperature"], data["pressure"] / 1000.0)
    finally:
        return text
Beispiel #39
0
 def get_context_data(self, *args, **kwargs):
     context = super(JobView, self).get_context_data(*args, **kwargs)
     response = http_get(SEARCH_URL)
     soup = BeautifulSoup(response.text)
     jobs_info = soup.find_all("div", attrs={"class":"detail"})
     jobs = []
     for job_info in jobs_info:
         jobs.append(
             dict(
                 title=job_info.a.text,
                 link=job_info.a["href"]
             )
         )
     context.update(jobs=jobs)
     return context
Beispiel #40
0
 def _load_dom(self):
     dom = BeautifulSoup(http_get(self.url).text)
     self.req_time = datetime.now()
     return dom
Beispiel #41
0
 def show_sliceboards(self, full=False):
     response = http_get(self.sliceboard_list_uri + self.auth_params,
                         stream=False)
     for sb in response.json()["objects"]:
         self._show_sliceboard(sb, full)
     return self
Beispiel #42
0
def fetch_peps():

    tmp_file = NamedTemporaryFile(delete=False)
    tmp_dir = mkdtemp()

    # Get the remote file and save it. I had some issues loading an in memory
    # zip file for some reason...

    try:

        environ['AWS_ACCESS_KEY_ID'], environ['AWS_SECRET_ACCESS_KEY']

        with OldSmallLucidInstance(terminate=True):

            sudo('apt-get -y -q install mercurial zip')
            run('hg clone http://hg.python.org/peps ~/peps/')
            put(join(dirname(abspath(__file__)), 'hg_config'), '~/.hgrc')
            with cd('~/peps/'):
                # So, Mercurial is annoying me. Half of the time after doing
                # a clean checkout its showin that there are file changes.
                # However, a diff shows nothing - I think its a file
                # permission thing... but anyway, I don't care what it is -
                # so doin a commit fixes it.
                run('hg commit -m "Hackety Hackety Hack!"')
                run('hg update --clean')
                run('hg kwexpand')
            run('zip -q -r ~/peps.zip ./peps/')
            get('~/peps.zip', tmp_file)
            pep_base = join(tmp_dir, 'peps')

    except KeyError:
        print '*' * 80
        print "AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environ vars need to be set."
        print "DEFAULTING TO THE non-mercurial pull method (Revisions and dates will be missing)"
        print '*' * 80
        f = http_get(zip_url)
        tmp_file.write(f.content)
        pep_base = join(tmp_dir, 'peps-*')

    # Extract the tmp file to a tmp directory.
    z = ZipFile(tmp_file)
    # We trust this zip file, otherwise shouldn't use extractall.
    z.extractall(tmp_dir)

    results = ((number,) + pep_file_to_metadata(filename)
        for number, filename in pep_numbers(pep_base))

    for number, path, raw, contents, properties in results:

        print number

        contents = contents.replace("http://www.python.org/dev/peps/pep-", "http://www.peps.io/")
        title = properties.pop('title')
        patterns = ["%Y-%m-%d %H:%M:%S", "%Y/%m/%d %H:%M:%S"]

        if properties.get('last-modified'):
            for pattern in patterns:
                try:
                    dt = datetime.strptime(properties.get('last-modified'),  pattern)
                    break
                except ValueError:
                    dt = None

        filename = path.rsplit("/")[-1]

        pep, created = get_or_create(Pep, commit=False, number=number, title=title, defaults={
            'properties': properties,
            'filename': filename,
            'content': contents,
            'raw_content': raw,
        })

        if not created:
            pep.properties = properties
            pep.filename = filename
            pep.content = contents
            pep.raw_content = raw
            if dt:
                pep.updated = dt
            db.session.add(pep)

    db.session.commit()
Beispiel #43
0
def get_blockchain(addr):
	url = "https://blockchain.info/rawaddr/"
	req = http_get(url + addr)
	return json.loads(req.content)
Beispiel #44
0
    def download(self):
        response = http_get(self._download_url)
        response.raise_for_status()
        resource_id = response.json()['resource_id']

        return resource_id