Esempio n. 1
0
from .utils import filter_search_string

import gi

gi.require_version('AppStreamGlib', '1.0')

from gi.repository import AppStreamGlib

# It is on the roof.
from .pool import ThreadPool

local = threading.local()
local.http = requests.session()
local.retries = Retry(connect=20, total=30, backoff_factor=2)
local.http.mount('https://', HTTPAdapter(max_retries=local.retries))
log = logging.getLogger()

# how many time to retry a downed server
MAX_RETRY = 10


def download_file(url, dest):
    dirname = os.path.dirname(dest)
    if not os.path.isdir(dirname):
        log.info("Creating directory %s" % dirname)
        os.makedirs(dirname)

    log.info("Downloading %s to %s" % (url, dest))
    r = local.http.get(url, stream=True)
    if not r.ok:
Esempio n. 2
0
 # PREPARE file to upload
 with open(mp3file, 'rb') as file_to_go:
     files = {'mp3': file_to_go}
     if imgfile is not None:
         files['picture'] = imgfile
     upload_url = 'https://api.mixcloud.com/upload/'
     #                        pprint("GONNA UPLOAD PAYLOAD: %s" % payload)
     print("GONNA UPLOAD FILE: %s" % mp3file)
     s = requests.Session()
     retries = Retry(total=5,
                     backoff_factor=20,
                     status_forcelist=[
                         400, 401, 402, 403, 500, 502, 503,
                         504
                     ])
     s.mount('http://', HTTPAdapter(max_retries=retries))
     r = s.post(
         upload_url,
         data=payload,
         params={
             'access_token':
             'Yv6WrXJAxZXW3nMcEJNyU3aNNtax6gm6'
         },
         files=files,
     )
     print("UPLOAD STATUS CODE: %s" % r.status_code)
     if r.status_code == 200:
         print("SUCCESSFULLY POSTED FILE: %s" % mp3file)
         pprint("RESPONSE TEXT: %s" % r.json())
         if r.json()['result']['success'] == True:
             pprint("FILE DONE! %s" % mp3file)
Esempio n. 3
0
import time
import json
import re
import gc
from apscheduler.schedulers.blocking import BlockingScheduler
from datetime import datetime
"""
汽车之家--东北三省(省会)汽车降价数据
"""

# 解决ssl证书警告
requests.packages.urllib3.disable_warnings()

# 解决超过最大链接
srequest = requests.session()
srequest.mount('https://', HTTPAdapter(max_retries=60))

# 解决重连
srequest.keep_alive = False
srequest.adapters.DEFAULT_RETRIES = 10


class AutohomeSpider(object):
    """汽车之家降价车型爬取类"""
    def __init__(self):
        self.headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
            'Connection':
            'close',
            'cookie':
Esempio n. 4
0
# coding: utf-8

from datetime import datetime
import time
import random
import json
import requests
from requests.adapters import HTTPAdapter

s = requests.Session()
requests.adapters.DEFAULT_RETRIES = 5
s.mount('http://', HTTPAdapter(max_retries=5))
s.mount('https://', HTTPAdapter(max_retries=5))


def search(open_id, search_key, keyword):

    print "process keywords: %s" % keyword.encode("UTF8")

    s_url = "https://search.weixin.qq.com/cgi-bin/searchweb/wxindex/querywxindexgroup?wxindex_query_list=%s&gid=&openid=%s&search_key=%s" % (
        keyword, open_id, search_key)
    headers = {
        "Referer":
        "https://servicewechat.com/wxc026e7662ec26a3a/7/page-frame.html",
        "User-Agent":
        "Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5 Build/M4B30Z; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/44.0.2403.117 Mobile Safari/537.36 MicroMessenger/6.7.3.1360(0x26070333) NetType/WIFI Language/zh_CN Process/appbrand0",
    }
    ss = s.get(s_url, headers=headers, timeout=3)
    content = json.loads(ss.content)

    wxindex_str = content["data"]["group_wxindex"][0]["wxindex_str"]
import requests
from requests.auth import HTTPBasicAuth
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError
from getpass import getpass, getuser
# random note - f strings are from python 3.6+

## At this point it does
# it prints the headers and the full text of the reponse, the html etc...

#r = requests.get('http://www.instagram.com/', auth=HTTPBasicAuth('getmegyoza', getpass()))

session_adapter = HTTPAdapter(max_retries=1)

savoy_url = 'http://www.savoyparkr.com/'
insta_url = 'http://www.instagram.com/accounts/login'

if dir(getpass) == dir(getuser):
    print('they are thee same')
    print(getuser())

else:
    print('they are not the same, please check into it')

with requests.Session() as session:
    #    session.auth = ('getmegyoza', getpass(prompt="getm*** password: "))
    #    session.mount(savoy_url, session_adapter)   # here to try and mitigate 429 response

    try:
        s = session.get(insta_url)
        print('everything is ok, no output here for now')
Esempio n. 6
0
from PIL import Image   #转换格式
import zipfile  #解压缩
import os
import time
import re
import json
import imageio          #合成gif
import math     #用ceil
from http.cookiejar import CookieJar

requests.packages.urllib3.disable_warnings(InsecureRequestWarning)      #强制取消警告
se = requests.session()

se.cookies = CookieJar()

se.mount('http://', HTTPAdapter(max_retries=1))
se.mount('https://', HTTPAdapter(max_retries=1))

class Pixiv():  
	# 收藏的作品
    def __init__(self):
        self.headers = {
            'referer': 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index',
            'origin': 'https://accounts.pixiv.net',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
                          'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
        }
        self.bookmark = 'https://www.pixiv.net/bookmark.php?rest=show&p='                                    #收藏url
        self.path = u'H:\\bookmark'
        self.jar = RequestsCookieJar()
Esempio n. 7
0
if TYPE_CHECKING:
    from pybatfish.client.session import Session  # noqa: F401

# suppress the urllib3 warnings due to old version of urllib3 (inside requests)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)

# Setup a session, configure retry policy
_requests_session = requests.Session()
# Prefix "http" will cover both "http" & "https"
_requests_session.mount(
    "http",
    HTTPAdapter(
        max_retries=Retry(
            connect=Options.max_tries_to_connect_to_coordinator,
            read=Options.max_tries_to_connect_to_coordinator,
            backoff_factor=Options.request_backoff_factor,
        )
    ),
)


# uncomment line below if you want http capture by fiddler
# _requests_session.proxies = {'http': 'http://127.0.0.1:8888',
#                              'https': 'http://127.0.0.1:8888'}


def get_json_response(session, resource, jsonData=None, useHttpGet=False):
    # type: (Session, str, Optional[Dict], bool) -> Dict[str, Any]
    """Send a request (POST or GET) to Batfish.
Esempio n. 8
0
def fetch_subscription_info(email: str) -> typing.Optional[dict]:
    """Returns the user info dict from the external subscriptions management server.

    :returns: the store user info, or None if the user can't be found or there
        was an error communicating. A dict like this is returned:
        {
            "shop_id": 700,
            "cloud_access": 1,
            "paid_balance": 314.75,
            "balance_currency": "EUR",
            "start_date": "2014-08-25 17:05:46",
            "expiration_date": "2016-08-24 13:38:45",
            "subscription_status": "wc-active",
            "expiration_date_approximate": true
        }
    """

    from requests.adapters import HTTPAdapter
    import requests.exceptions

    external_subscriptions_server = current_app.config['EXTERNAL_SUBSCRIPTIONS_MANAGEMENT_SERVER']

    if log.isEnabledFor(logging.DEBUG):
        import urllib.parse

        log_email = urllib.parse.quote(email)
        log.debug('Connecting to store at %s?blenderid=%s',
                  external_subscriptions_server, log_email)

    # Retry a few times when contacting the store.
    s = requests.Session()
    s.mount(external_subscriptions_server, HTTPAdapter(max_retries=5))

    try:
        r = s.get(external_subscriptions_server,
                  params={'blenderid': email},
                  verify=current_app.config['TLS_CERT_FILE'],
                  timeout=current_app.config.get('EXTERNAL_SUBSCRIPTIONS_TIMEOUT_SECS', 10))
    except requests.exceptions.ConnectionError as ex:
        log.error('Error connecting to %s: %s', external_subscriptions_server, ex)
        return None
    except requests.exceptions.Timeout as ex:
        log.error('Timeout communicating with %s: %s', external_subscriptions_server, ex)
        return None
    except requests.exceptions.RequestException as ex:
        log.error('Some error communicating with %s: %s', external_subscriptions_server, ex)
        return None

    if r.status_code != 200:
        log.warning("Error communicating with %s, code=%i, unable to check "
                    "subscription status of user %s",
                    external_subscriptions_server, r.status_code, email)
        return None

    store_user = r.json()

    if log.isEnabledFor(logging.DEBUG):
        import json
        log.debug('Received JSON from store API: %s',
                  json.dumps(store_user, sort_keys=False, indent=4))

    return store_user
Esempio n. 9
0
import os.path
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool, cpu_count
from requests.adapters import HTTPAdapter

import savepath

session = requests.Session()
session.mount('http://', HTTPAdapter(max_retries=3))
session.mount('https://', HTTPAdapter(max_retries=3))

base_url = 'https://www.mzitu.com'
all_url = base_url + '/all'
old_url = base_url + '/old'
dir_name = '妹子图'
save_path = save_path = os.path.join(savepath.save_path, dir_name)

if not os.path.exists(save_path):
    os.makedirs(save_path)

cookies = None


def parse_url(url, headers=None, timeout=5, cookies=None):
    response = session.get(url,
                           headers=headers,
                           timeout=timeout,
                           cookies=cookies)
    if response.status_code == 200:
        cookies = response.cookies
Esempio n. 10
0
def request_session(
        url: str,
        method: str,
        payload: dict = None,
        headers: dict = None,
        username: str = settings.ONA_USERNAME,
        password: str = settings.ONA_PASSWORD,
        retries=3,
        backoff_factor=1.1,
        status_forcelist=(500, 502, 504),
):  # pylint: disable=too-many-arguments
    """
    Custom Method that takes in a URL, Method(GET / POST) and optionally
    retries, backoff_factor and status_forcelist. It creates a Request
    Session and Retry Object and mounts a HTTP Adapter to the
    Session and Sends a request to the url. It then returns the Response.

    The backoff policy is documented here:
    https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#module-urllib3.util.retry
    """  # noqa
    session = requests.Session()
    retries = Retry(total=retries,
                    read=retries,
                    connect=retries,
                    backoff_factor=backoff_factor,
                    status_forcelist=status_forcelist)

    if username is not None:
        basic_auth = (username, password)
    else:
        basic_auth = None

    adapter = HTTPAdapter(max_retries=retries)
    session.mount('https://', adapter)
    session.mount('http://', adapter)

    if method == 'GET':
        response = session.get(url,
                               auth=basic_auth,
                               params=payload,
                               headers=headers)
        return response
    if method == 'POST':
        response = session.post(url,
                                auth=basic_auth,
                                json=payload,
                                headers=headers)
        return response
    if method == 'PATCH':
        response = session.patch(url,
                                 auth=basic_auth,
                                 json=payload,
                                 headers=headers)
        return response
    if method == 'PUT':
        response = session.put(url,
                               auth=basic_auth,
                               json=payload,
                               headers=headers)
        return response
    if method == 'DELETE':
        response = session.delete(url, auth=basic_auth, headers=headers)
        return response

    return None
Esempio n. 11
0
class Auth:

    RETRY_CONFIG = Retry(
        total=5,
        backoff_factor=random.uniform(1, 10),
        method_whitelist=frozenset(["GET", "POST"]),
        status_forcelist=[429, 500, 502, 503, 504],
    )

    ADAPTER = ThreadLocalWrapper(
        lambda: HTTPAdapter(max_retries=Auth.RETRY_CONFIG))

    def __init__(
        self,
        domain="https://accounts.descarteslabs.com",
        scope=None,
        leeway=500,
        token_info_path=DEFAULT_TOKEN_INFO_PATH,
        client_id=None,
        client_secret=None,
        jwt_token=None,
        refresh_token=None,
    ):
        """
        Helps retrieve JWT from a client id and refresh token for cli usage.

        :param domain: endpoint for auth0
        :param scope: the JWT fields to be included
        :param leeway: JWT expiration leeway
        :param token_info_path: path to a JSON file optionally holding auth information
        :param client_id: JWT client id
        :param client_secret: JWT client secret
        :param jwt_token: the JWT token, if we already have one
        :param refresh_token: the refresh token
        """
        self.token_info_path = token_info_path

        token_info = {}
        if self.token_info_path:
            try:
                with open(self.token_info_path) as fp:
                    token_info = json.load(fp)
            except (IOError, ValueError):
                pass

        self.client_id = next(
            (x for x in (
                client_id,
                os.environ.get("DESCARTESLABS_CLIENT_ID"),
                os.environ.get("CLIENT_ID"),
                token_info.get("client_id"),
            ) if x is not None),
            None,
        )

        self.client_secret = next(
            (x for x in (
                client_secret,
                os.environ.get("DESCARTESLABS_CLIENT_SECRET"),
                os.environ.get("CLIENT_SECRET"),
                token_info.get("client_secret"),
            ) if x is not None),
            None,
        )

        self.refresh_token = next(
            (x for x in (
                refresh_token,
                os.environ.get("DESCARTESLABS_REFRESH_TOKEN"),
                token_info.get("refresh_token"),
            ) if x is not None),
            None,
        )

        if self.client_secret != self.refresh_token:
            if self.client_secret is not None and self.refresh_token is not None:
                warnings.warn(
                    "Authentication token mismatch: "
                    "client_secret and refresh_token values must match for authentication to work correctly. "
                )

            if self.refresh_token is not None:
                self.client_secret = self.refresh_token
            elif self.client_secret is not None:
                self.refresh_token = self.client_secret

        self._token = next(
            (x for x in (
                jwt_token,
                os.environ.get("DESCARTESLABS_TOKEN"),
                token_info.get("JWT_TOKEN"),
                token_info.get("jwt_token"),
            ) if x is not None),
            None,
        )

        self.scope = next(
            (x for x in (scope, token_info.get("scope")) if x is not None),
            None)

        if token_info:
            # If the token was read from a path but environment variables were set, we may need
            # to reset the token.
            client_id_changed = token_info.get("client_id",
                                               None) != self.client_id
            client_secret_changed = (token_info.get("client_secret", None) !=
                                     self.client_secret)
            refresh_token_changed = (token_info.get("refresh_token", None) !=
                                     self.refresh_token)

            if client_id_changed or client_secret_changed or refresh_token_changed:
                self._token = None

        self._namespace = None
        self._session = ThreadLocalWrapper(self.build_session)
        self.domain = domain
        self.leeway = leeway

    @classmethod
    def from_environment_or_token_json(cls, **kwargs):
        """
        Creates an Auth object from environment variables CLIENT_ID,
        CLIENT_SECRET, JWT_TOKEN if they are set, or else from a JSON
        file at the given path.

        :param domain: endpoint for auth0
        :param scope: the JWT fields to be included
        :param leeway: JWT expiration leeway
        :param token_info_path: path to a JSON file optionally holding auth information
        """
        return Auth(**kwargs)

    @property
    def token(self):
        if self._token is None:
            self._get_token()
        else:  # might have token but could be close to expiration
            exp = self.payload.get("exp")

            if exp is not None:
                now = (datetime.datetime.utcnow() -
                       datetime.datetime(1970, 1, 1)).total_seconds()
                if now + self.leeway > exp:
                    try:
                        self._get_token()
                    except AuthError as e:
                        # Unable to refresh, raise if now > exp
                        if now > exp:
                            raise e

        return self._token

    @property
    def payload(self):
        if self._token is None:
            self._get_token()

        if isinstance(self._token, six.text_type):
            token = self._token.encode("utf-8")
        else:
            token = self._token

        claims = token.split(b".")[1]
        return json.loads(base64url_decode(claims).decode("utf-8"))

    @property
    def session(self):
        return self._session.get()

    def build_session(self):
        session = requests.Session()
        session.mount("https://", self.ADAPTER.get())
        return session

    def _get_token(self, timeout=100):
        if self.client_id is None:
            raise AuthError("Could not find client_id")

        if self.client_secret is None and self.refresh_token is None:
            raise AuthError("Could not find client_secret or refresh token")

        if self.client_id in ["ZOBAi4UROl5gKZIpxxlwOEfx8KpqXf2c"
                              ]:  # TODO(justin) remove legacy handling
            # TODO (justin) insert deprecation warning
            if self.scope is None:
                scope = ["openid", "name", "groups", "org", "email"]
            else:
                scope = self.scope
            params = {
                "scope": " ".join(scope),
                "client_id": self.client_id,
                "grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
                "target": self.client_id,
                "api_type": "app",
                "refresh_token": self.refresh_token,
            }
        else:
            params = {
                "client_id": self.client_id,
                "grant_type": "refresh_token",
                "refresh_token": self.refresh_token,
            }

            if self.scope is not None:
                params["scope"] = " ".join(self.scope)

        r = self.session.post(self.domain + "/token",
                              json=params,
                              timeout=timeout)

        if r.status_code != 200:
            raise OauthError("%s: %s" % (r.status_code, r.text))

        data = r.json()
        access_token = data.get("access_token")
        id_token = data.get(
            "id_token")  # TODO(justin) remove legacy id_token usage

        if access_token is not None:
            self._token = access_token
        elif id_token is not None:
            self._token = id_token
        else:
            raise OauthError("could not retrieve token")
        token_info = {}

        if self.token_info_path:
            try:
                with open(self.token_info_path) as fp:
                    token_info = json.load(fp)
            except (IOError, ValueError):
                pass

        token_info["jwt_token"] = self._token

        if self.token_info_path:
            token_info_directory = os.path.dirname(self.token_info_path)
            makedirs_if_not_exists(token_info_directory)

            try:
                with open(self.token_info_path, "w+") as fp:
                    json.dump(token_info, fp)

                os.chmod(self.token_info_path, stat.S_IRUSR | stat.S_IWUSR)
            except IOError as e:
                warnings.warn("failed to save token: {}".format(e))

    @property
    def namespace(self):
        if self._namespace is None:
            self._namespace = sha1(
                self.payload["sub"].encode("utf-8")).hexdigest()
        return self._namespace
Esempio n. 12
0
    def __init__(self,
                 host,
                 port,
                 token,
                 index,
                 hostname=None,
                 source=None,
                 sourcetype='text',
                 verify=True,
                 timeout=60,
                 flush_interval=15.0,
                 queue_size=5000,
                 debug=False,
                 retry_count=5,
                 retry_backoff=2.0,
                 multiple_process=False,
                 protocol='https',
                 proxies=None,
                 record_format=False):

        global instances
        instances.append(self)
        logging.Handler.__init__(self)

        self.host = host
        self.port = port
        self.token = token
        self.index = index
        self.source = source
        self.sourcetype = sourcetype
        self.verify = verify
        self.timeout = timeout
        self.flush_interval = flush_interval
        self.log_payload = ""
        self.SIGTERM = False  # 'True' if application requested exit
        self.timer = None
        self.testing = False  # Used for slightly altering logic during unit testing
        self.multiple_process = multiple_process
        # It is possible to get 'behind' and never catch up, so we limit the queue size
        if self.multiple_process:
            self.queue = JoinableQueue(maxsize=queue_size)
        else:
            self.queue = Queue(maxsize=queue_size)
        self.debug = debug
        self.session = requests.Session()
        self.retry_count = retry_count
        self.retry_backoff = retry_backoff
        self.protocol = protocol
        self.proxies = proxies
        self.record_format = record_format

        self.write_debug_log("Starting debug mode")

        if hostname is None:
            self.hostname = socket.gethostname()
        else:
            self.hostname = hostname

        self.write_debug_log("Preparing to override loggers")

        # prevent infinite recursion by silencing requests and urllib3 loggers
        logging.getLogger('requests').propagate = False
        logging.getLogger('urllib3').propagate = False

        # and do the same for ourselves
        logging.getLogger(__name__).propagate = False

        # disable all warnings from urllib3 package
        if not self.verify:
            requests.packages.urllib3.disable_warnings()

        if self.verify and self.protocol == 'http':
            print("[SplunkHandler DEBUG] " +
                  'cannot use SSL Verify and unsecure connection')

        if self.proxies is not None:
            self.session.proxies = self.proxies

        # Set up automatic retry with back-off
        self.write_debug_log("Preparing to create a Requests session")
        retry = Retry(
            total=self.retry_count,
            backoff_factor=self.retry_backoff,
            method_whitelist=False,  # Retry for any HTTP verb
            status_forcelist=[500, 502, 503, 504])
        self.session.mount(self.protocol + '://',
                           HTTPAdapter(max_retries=retry))

        self.start_worker_thread()

        self.write_debug_log("Class initialize complete")
Esempio n. 13
0
class ScrapperUsingRequest:
    retry_strategy = Retry(
        total=5,
        status_forcelist=[429, 500, 502, 503, 504],
        method_whitelist=[
            "HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE", "POST"
        ],
        backoff_factor=1,
    )
    adapter = HTTPAdapter(max_retries=retry_strategy)
    http = requests.Session()

    def __init__(self, protocol, proxy):
        self.http.mount("https://", self.adapter)
        self.http.mount("http://", self.adapter)
        self.proxy = proxy
        self.main_url = f'http{protocol}://sada.guilan.ac.ir'

    def login(self, username, password):
        ## Old design of site
        # data = {
        #     'Command': 'LOGIN',
        #     'username': username,
        #     'password': password,
        #     #'SSMUsername_txt': user_data['username'],
        #     #'SSMPassword_txt': user_data['password'],
        # }
        # login_request = self.http.post('http'+self.protocol+'://sada.guilan.ac.ir/SubSystem/Edari/PRelate/Site/SignIn.aspx', data=data, timeout=7, proxies=self.proxy)
        # dashboard_param_search = re.search(r'\(\"http'+self.protocol+r'\:\/\/sada\.guilan\.ac\.ir\/Dashboard\.aspx\?param\=(?P<param>.*?)\"\)', login_request.text)
        # if dashboard_param_search is None:
        #     if login_request.text.find('رمز عبور شما اشتباه ميباشد') >= 0 or login_request.text.find('نام کاربري يا کلمه عبور شما اشتباه ميباشد') >= 0:
        #         raise MyError('incorrect password_or_username', 'iup')  # incorrect username password
        #     else:
        #         logger.info(login_request.text)
        #         raise Exception('dashbord_param or incorrect_password_or_username_message not found', 'dpnf')  # dashbord param not found
        # dashboard_param = dashboard_param_search.group('param')
        # return dashboard_param

        ## New design
        hs = self.http.headers
        hs['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0'
        hs['Accept'] = 'application/json, text/plain, */*'
        if 'Authentication' in hs:
            del hs['Authentication']
        if 'Authorization' in hs:
            del hs['Authorization']
        cipher = lambda p: '3' + '3'.join(list(p))
        self.http.get(f'{self.main_url}/Hermes',
                      headers=hs,
                      timeout=7,
                      proxies=self.proxy)
        self.http.get(f'{self.main_url}/Hermes.html',
                      timeout=7,
                      proxies=self.proxy)

        d = {
            'inMethode_Name': '/api/Dashboard_Token_Initial',
            'language': '',
            'param': 'null',
            'params': ''
        }
        ti_page = self.http.post(
            f'{self.main_url}/api/Dashboard_Token_Initial',
            data=d,
            timeout=7,
            proxies=self.proxy)

        hs = self.http.headers
        auth_token = json.loads(ti_page.text)['outInfoJson']
        hs['Authentication'] = 'Bearer ' + auth_token
        d2 = {
            'inMethode_Name': '/api/Dashboard_CheckIs',
            'url': '/Hermes',
            'param': 'null',
            'params': ''
        }
        ci_page = self.http.post(f'{self.main_url}/api/Dashboard_CheckIs',
                                 data=d2,
                                 headers=hs,
                                 timeout=7,
                                 proxies=self.proxy)

        hs = self.http.headers
        access_token = json.loads(ci_page.text)['accessToken']
        hs['Authorization'] = 'Bearer ' + access_token
        d3 = {'inMethode_Name': '/api/Dashboard_Sign_CheckIs', 'hash': None}
        sci_page = self.http.post(
            f'{self.main_url}/api/Dashboard_Sign_CheckIs',
            data=json.dumps(d3),
            headers=hs,
            timeout=7,
            proxies=self.proxy)

        hs = self.http.headers
        access_token = json.loads(sci_page.text)['accessToken']
        hs['Authorization'] = 'Bearer ' + access_token
        d4 = {
            'inMethode_Name': '/api/Dashboard_Sign_In',
            'hash': None,
            'device': 'Desktop',
            'Rememberme': False,
            'cipher': cipher(password),
            'userName': username
        }
        si_page = self.http.post(f'{self.main_url}/api/Dashboard_Sign_In',
                                 data=json.dumps(d4),
                                 headers=hs,
                                 timeout=7,
                                 proxies=self.proxy)
        si_j = json.loads(si_page.text)

        if si_j['outNumber'] == -1:
            raise MyError(err_message=si_j['outMessage'],
                          err_code='fs')  # from site
        hs = self.http.headers
        del hs['Authorization']
        auth_token = si_j['outInfoJson']
        hs['Authentication'] = 'Bearer ' + auth_token
        d = {
            'inMethode_Name': '/api/Dashboard_Token_Initial',
            'language': '',
            'param': 'null',
            'params': ''
        }
        ti_page = self.http.post(
            f'{self.main_url}/api/Dashboard_Token_Initial',
            data=d,
            headers=hs,
            timeout=7,
            proxies=self.proxy)

        hs = self.http.headers
        auth_token = json.loads(ti_page.text)['outInfoJson']
        hs['Authentication'] = 'Bearer ' + auth_token
        d2 = {
            'inMethode_Name': '/api/Dashboard_CheckIs',
            'url': '/Hermes',
            'param': 'null',
            'params': ''
        }
        ci_page = self.http.post(f'{self.main_url}/api/Dashboard_CheckIs',
                                 data=d2,
                                 headers=hs,
                                 timeout=7,
                                 proxies=self.proxy)

        ci_j = json.loads(ci_page.text)
        return ci_j

    def get_report(self, access_token):
        # OLD design
        # report_request = self.http.post(f'{self.main_url}/Dashboard.aspx', params={'param': dashboard_param}, data={'Command': 'GET_TAB_INFO:020203'}, timeout=7, proxies=self.proxy)
        # report_param_search = re.search(r'\/Subsystem\/Amozesh\/Sabtenam\/Tasbir\/Report\/Report\.aspx\?param\=(?P<param>.*)', report_request.text)
        # if report_param_search is None:
        #     if report_request.text.find('بدهکار') >= 0:
        #         raise MyError('report problem because of debt', 'd')  # debt
        #     elif 'eval' in report_request.text.lower():
        #         raise MyError('report problem because of evallist', 'eval')  # evalList
        #     else:
        #         logger.info(report_request.text)
        #         raise Exception('report_param or debt_message not found', 'rpnf')  # report param not found
        # report_param = report_param_search.group('param')
        # report_page = self.http.get(f'{self.main_url}/Subsystem/Amozesh/Sabtenam/Tasbir/Report/Report.aspx', params={'param': report_param}, timeout=7, proxies=self.proxy)
        # return report_page
        hs = self.http.headers
        hs['Authorization'] = 'Bearer ' + access_token
        profile_page = self.http.post(
            f'{self.main_url}/api/Dashbord_Profile_Std_CheckIs',
            data={'inMethode_Name': '/api/Dashbord_Profile_Std_CheckIs'},
            headers=hs,
            timeout=7,
            proxies=self.proxy)
        menu_access_token = json.loads(profile_page.text)['accessToken']
        menu_hs = self.http.headers
        menu_hs['Authorization'] = 'Bearer ' + menu_access_token
        menu_page = self.http.post(
            f'{self.main_url}/api/Core_Menu_User',
            data={'inMethode_Name': '/api/Core_Menu_User'},
            headers=menu_hs,
            timeout=7,
            proxies=self.proxy)
        id_menu_2 = ''
        j = json.loads(menu_page.text)
        for rc in j:
            if rc['hafmanCode'] == '0202':
                for c in rc['childs']:
                    if c['hafmanCode'] == '020203':
                        id_menu_2 = c['idMenu2']
        if not id_menu_2:
            raise MyError(
                'فرم تثبیت تو سایت نیستش. از دکمه (گرفتن برنامه ترمهای قبل) استفاده کن و ترم آخر رو انتخاب کن',
                'rpne')  # report param not exist
        hs = self.http.headers
        hs['Authorization'] = 'Bearer ' + access_token
        d = {'idMenu2': id_menu_2, 'inMethode_Name': '/api/Core_Menu_Run'}
        tran_page = self.http.post(
            f'{self.main_url}/SubSystem/Angular_Tran.aspx',
            data=json.dumps(d),
            headers=hs,
            timeout=7,
            proxies=self.proxy)
        report_param = json.loads(tran_page.text)['outInfoJson']
        if 'eval' in report_param.lower():
            raise MyError('مثل اینکه باید فرم ارزیابی اساتید رو پر کنی.',
                          'eval')
        report_param = report_param.replace(
            '/Subsystem/Amozesh/Sabtenam/Tasbir/Report/Report.aspx?param=', '')
        report_page = self.http.get(
            f'{self.main_url}/Subsystem/Amozesh/Sabtenam/Tasbir/Report/Report.aspx',
            params={'param': report_param},
            timeout=7,
            proxies=self.proxy)
        return report_page

    def get_workbook(self, access_token):  # dashboard_param):
        ## Old design
        # workbook_request = self.http.post('http'+self.protocol+'://sada.guilan.ac.ir/Dashboard.aspx', params={'param': dashboard_param}, data={'Command': 'GET_TAB_INFO:020205'}, timeout=7, proxies=self.proxy)
        # workbook_param_search = re.search(r'\/Subsystem\/Amozesh\/Stu\/WorkBook\/StdWorkBook_Index\.aspx\?param\=(?P<param>.*)', workbook_request.text)
        # if workbook_param_search is None:
        #     raise Exception('workbook_param not found', 'wpnf')  # workbook param not found
        # workbook_param = workbook_param_search.group('param')
        # return workbook_param

        hs = self.http.headers
        hs['Authorization'] = 'Bearer ' + access_token
        profile_page = self.http.post(
            f'{self.main_url}/api/Dashbord_Profile_Std_CheckIs',
            data={'inMethode_Name': '/api/Dashbord_Profile_Std_CheckIs'},
            headers=hs,
            timeout=7,
            proxies=self.proxy)
        menu_access_token = json.loads(profile_page.text)['accessToken']
        menu_hs = self.http.headers
        menu_hs['Authorization'] = 'Bearer ' + menu_access_token
        menu_page = self.http.post(
            f'{self.main_url}/api/Core_Menu_User',
            data={'inMethode_Name': '/api/Core_Menu_User'},
            headers=menu_hs,
            timeout=7,
            proxies=self.proxy)
        id_menu_2 = ''
        j = json.loads(menu_page.text)
        for rc in j:
            if rc['hafmanCode'] == '0202':
                for c in rc['childs']:
                    if c['hafmanCode'] == '020205':
                        id_menu_2 = c['idMenu2']
        hs = self.http.headers
        hs['Authorization'] = 'Bearer ' + access_token
        d = {'idMenu2': id_menu_2, 'inMethode_Name': '/api/Core_Menu_Run'}
        tran_page = self.http.post(
            f'{self.main_url}/SubSystem/Angular_Tran.aspx',
            data=json.dumps(d),
            headers=hs,
            timeout=7,
            proxies=self.proxy)
        workbook_param = json.loads(tran_page.text)['outInfoJson']
        if 'eval' in workbook_param.lower():
            raise MyError('مثل اینکه باید فرم ارزیابی اساتید رو پر کنی.',
                          'eval')
        return workbook_param.replace(
            '/Subsystem/Amozesh/Stu/WorkBook/StdWorkBook_Index.aspx?param=',
            '')

    def get_term(self, workbook_param, prev_term, number_of_term):
        request_for_term = self.http.get(
            f'{self.main_url}/Subsystem/Amozesh/Stu/WorkBook/StdWorkBook_Index.aspx',
            params={'param': workbook_param},
            timeout=15,
            proxies=self.proxy)
        all_terms = BeautifulSoup(request_for_term.text, 'lxml')
        all_terms = all_terms.find(id='Term_Drp')
        if not prev_term:
            term = all_terms.find_all('option')[-1]['value']
        else:
            if number_of_term != -1:
                term = all_terms.find_all('option')[number_of_term]['value']
            else:
                terms_keyboard = []
                for term_index, term_str in enumerate(
                        all_terms.find_all('option')[1:]):
                    terms_keyboard.append(
                        [str(term_index + 1) + ' : ' + term_str.text])
                return terms_keyboard
        data = {
            'SubIs_Chk': 'false',
            'Command': 'Log:Vahed',
            'Hitab': 'Vahed',
            'TypeCard_Drp': 'rpGrade_Karname_2',
            'mx_grid_info': '0;1;1;1;;;onGridLoad;1;;',
            'Term_Drp': term
        }
        term_page = self.http.post(
            f'{self.main_url}/Subsystem/Amozesh/Stu/WorkBook/StdWorkBook_Index.aspx',
            params={'param': workbook_param},
            data=data,
            timeout=10,
            proxies=self.proxy)
        return term_page

    def get_infos_from_term_page(self, term_page_text):
        info = []
        soup = BeautifulSoup(term_page_text, 'lxml')
        tables = soup.find_all('table')

        time_column_index = 5
        for column_index, column in enumerate(tables[0].find_all('th')):
            if column.text == 'برنامه زماني':
                time_column_index = column_index
        table = tables[-1]
        rows = table.find_all('tr')
        for _, row in enumerate(rows):
            parts_of_row = row.find_all('td')
            info.append(parts_of_row[time_column_index].text + '\t\t\t' +
                        parts_of_row[1].text + '\t\t(((' +
                        parts_of_row[time_column_index -
                                     1].text.replace('\n ', ''))
        return {'tabel': info, 'midterm': [], 'exams': []}

    def get_infos_from_report_page(self, report_page_text):
        info = []
        soup = BeautifulSoup(report_page_text, 'lxml')
        rows = soup.find_all('table', class_='grd')
        time_column_index = -1
        for column_index in range(len(rows[0].find_all('td'))):
            if rows[0].find_all('td')[column_index].find(
                    'span').text == 'زمان برگزاري':
                time_column_index = column_index
        if time_column_index == -1:
            raise MyError('table is empty', 'empty')
        rows = rows[1:]
        number_of_rows = 0
        for row_index in range(len(rows)):
            try:
                int(rows[row_index].find_all('td')[0].find('span').text)
                number_of_rows = row_index

            except ValueError:
                break
        gc.collect()
        rows = rows[0:number_of_rows + 1]

        for row_index in range(len(rows)):
            parts_of_row = rows[row_index].find_all('td')
            info.append(parts_of_row[time_column_index].find('span').text +
                        '\t\t\t' + parts_of_row[2].find('span').text +
                        '\t\t(((' + parts_of_row[time_column_index - 1].find(
                            'span').text.replace('\n ', '').replace('\n', ''))
        exams_time_column_index = -1
        for column_index in range(len(rows[0].find_all('td'))):
            if rows[0].find_all('td')[column_index].find(
                    'span').text == 'زمان امتحان':
                exams_time_column_index = column_index
        exams_info = []
        for row_index in range(len(rows)):
            parts_of_row = rows[row_index].find_all('td')
            exams_info.append(
                parts_of_row[2].find('span').text + '   :   ' +
                parts_of_row[exams_time_column_index].find('span').text)

        return {'tabel': info, 'midterm': [], 'exams': exams_info}

    def get_eval_list(self, e_l_url):
        evalList_param_search = re.search(
            r'\/SubSystem\/Amozesh\/Eval\/List\/EvalList\.aspx\?param\=(?P<param>.*)',
            e_l_url)
        evalList_param = evalList_param_search.group('param')

        evalList_page = self.http.get(
            f'{self.main_url}/SubSystem/Amozesh/Eval/List/EvalList.aspx',
            params={'param': evalList_param},
            timeout=7,
            proxies=self.proxy)
        soup = BeautifulSoup(evalList_page.text, 'lxml')
        eval_list = soup.find_all('table')[-1].find_all('tr')
        return eval_list, evalList_param

    def get_professor_list(self, eval_elem, evalList_param):
        hs = self.http.headers
        hs['Referer'] = f'{self.main_url}/SubSystem/Amozesh/Eval/List/EvalList.aspx?param={evalList_param}'
        Command_data = 'AnswerSubject♥' + eval_elem.find_all(
            'td')[0].text + '♥' + eval_elem.find_all('td')[3].text
        eval_request = self.http.post(
            f'{self.main_url}/SubSystem/Amozesh/Eval/List/EvalList.aspx',
            params={'param': evalList_param},
            data={'Command': Command_data},
            headers=hs,
            timeout=7,
            proxies=self.proxy)
        eval_param = eval_request.text
        eval_page = self.http.get(
            f'{self.main_url}/SubSystem/Amozesh/Eval/Answer/Subject/EvalAnswerSubject.aspx',
            params={'param': eval_param},
            timeout=7,
            proxies=self.proxy)

        inner_soup = BeautifulSoup(eval_page.text, 'lxml')
        professor_list = inner_soup.find_all('table')[-1].find_all('tr')
        return professor_list, eval_param

    def get_questions(self, professor_elem, eval_param):
        Command_data = 'Answer♥' + professor_elem.find_all('td')[0].text + '♥' + professor_elem.find_all('td')[1].text + '♥' + professor_elem.find_all('td')[2].text +\
                        '♥' + professor_elem.find_all('td')[3].text + '♥' + professor_elem.find_all('td')[4].text + '♥' + professor_elem.find_all('td')[7].text

        professor_request = self.http.post(
            f'{self.main_url}/SubSystem/Amozesh/Eval/Answer/Subject/EvalAnswerSubject.aspx',
            params={'param': eval_param},
            data={'Command': Command_data},
            timeout=7,
            proxies=self.proxy)
        questions_param = professor_request.text
        questions_page = self.http.get(
            f'{self.main_url}/SubSystem/Amozesh/Eval/Answer/ListItems.aspx',
            params={'param': questions_param},
            timeout=7,
            proxies=self.proxy)
        qs_soup = BeautifulSoup(questions_page.text, 'lxml')
        qs = qs_soup.find_all('table')[-1].find_all('tr')
        return qs, questions_param

    def answer_qs_professor(self, qs, score, questions_param):
        x = 'Insert:??*'
        post_data = {}
        one_random_q = randint(0, len(qs) - 1)
        for q_i, q in enumerate(qs):
            nomre = score
            if len(q.find_all('td')) > 3:
                x += q.find_all('td')[3].text
            x += '?'
            if q_i == one_random_q:
                if nomre == 8:
                    nomre -= 1
                elif nomre == 0:
                    nomre += 1
                else:
                    nomre = nomre + [1, -1][randint(0, 1)]
            for inp_el in q.find_all('input'):
                # nomre = 1 # means 19
                if inp_el['id'][:3] == 'rb' + str(nomre):
                    post_data[inp_el['id']] = 'true'
                    x += inp_el['value']
            x += '?'
            x += q.find_all('td')[10].text
            x += '*'
        x += ':'
        post_data['Command'] = x
        logger.info('EVALLLLLLLL  ' + str(post_data))
        professor_eval_request = self.http.post(
            f'{self.main_url}/SubSystem/Amozesh/Eval/Answer/ListItems.aspx',
            params={'param': questions_param},
            data=post_data,
            timeout=7,
            proxies=self.proxy)
        return 'ok' in professor_eval_request.text.lower()
Esempio n. 14
0
 def _get_session_with_retries(self):
     session = requests.Session()
     retries = Retry(total=3, backoff_factor=1)
     session.mount('http://', HTTPAdapter(max_retries=retries))
     session.mount('https://', HTTPAdapter(max_retries=retries))
     return session
Esempio n. 15
0
from future.moves.urllib.parse import urlencode
import requests

import gitlab
import cachecontrol
from requests.adapters import HTTPAdapter
from rest_framework import status as http_status
from framework.exceptions import HTTPError

from addons.gitlab.exceptions import NotFoundError, AuthError
from addons.gitlab.settings import DEFAULT_HOSTS

# Initialize caches
https_cache = cachecontrol.CacheControlAdapter()
default_adapter = HTTPAdapter()


class GitLabClient(object):
    def __init__(self, external_account=None, access_token=None, host=None):
        self.access_token = getattr(external_account, 'oauth_key',
                                    None) or access_token
        self.host = getattr(external_account, 'oauth_secret',
                            None) or host or DEFAULT_HOSTS[0]

        if self.access_token:
            self.gitlab = gitlab.Gitlab(self.host,
                                        private_token=self.access_token)
        else:
            self.gitlab = gitlab.Gitlab(self.host)

    def user(self, user=None):
Esempio n. 16
0
class Service(object):
    """The default Descartes Labs HTTP Service used to communicate with its servers.

    This service has a default timeout and retry policy that retries HTTP requests
    depending on the timeout and HTTP status code that was returned.  This is based
    on the `requests timeouts
    <https://requests.readthedocs.io/en/master/user/advanced/#timeouts>`_
    and the `urllib3 retry object
    <https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry>`_.

    The default timeouts are set to 9.5 seconds for establishing a connection (slightly
    larger than a multiple of 3, which is the TCP default packet retransmission window),
    and 30 seconds for reading a response.

    The default retry logic retries up to 3 times total, a maximum of 2 for establishing
    a connection, 2 for reading a response, and 2 for unexpected HTTP status codes.
    The backoff_factor is a random number between 1 and 3, but will never be more
    than 2 minutes.  The unexpected HTTP status codes that will be retried are ``500``,
    ``502``, ``503``, and ``504`` for any of the HTTP requests.

    Parameters
    ----------
    url: str
        The URL prefix to use for communication with the Descartes Labs server.
    token: str, optional
        Deprecated.
    auth: Auth, optional
        A Descartes Labs :py:class:`~descarteslabs.client.auth.Auth` instance.  If not
        provided, a default one will be instantiated.
    retries: int or urllib3.util.retry.Retry
        If a number, it's the number of retries that will be attempled.  If a
        :py:class:`urllib3.util.retry.Retry` instance, it will determine the retry
        behavior.  If not provided, the default retry policy as described above will
        be used.
    session_class: class
        The session class to use when instantiating the session.  This must be a derived
        class from :py:class:`Session`.  If not provided, the default session class
        is used.  You can register a default session class with
        :py:meth:`Service.set_default_session_class`.

    Raises
    ------
    TypeError
        If you try to use a session class that is not derived from :py:class:`Session`.
    """

    # https://requests.readthedocs.io/en/master/user/advanced/#timeouts
    CONNECT_TIMEOUT = 9.5
    READ_TIMEOUT = 30

    TIMEOUT = (CONNECT_TIMEOUT, READ_TIMEOUT)

    RETRY_CONFIG = Retry(
        total=3,
        connect=2,
        read=2,
        status=2,
        backoff_factor=random.uniform(1, 3),
        method_whitelist=frozenset(
            [
                HttpRequestMethod.HEAD,
                HttpRequestMethod.TRACE,
                HttpRequestMethod.GET,
                HttpRequestMethod.POST,
                HttpRequestMethod.PUT,
                HttpRequestMethod.PATCH,
                HttpRequestMethod.OPTIONS,
                HttpRequestMethod.DELETE,
            ]
        ),
        status_forcelist=[
            HttpStatusCode.InternalServerError,
            HttpStatusCode.BadGateway,
            HttpStatusCode.ServiceUnavailable,
            HttpStatusCode.GatewayTimeout,
        ],
    )

    # We share an adapter (one per thread/process) among all clients to take advantage
    # of the single underlying connection pool.
    ADAPTER = ThreadLocalWrapper(lambda: HTTPAdapter(max_retries=Service.RETRY_CONFIG))

    _session_class = Session

    @classmethod
    def set_default_session_class(cls, session_class):
        """Set the default session class for :py:class:`Service`.

        The default session is used for any :py:class:`Service` that is instantiated
        without specifying the session class.

        Parameters
        ----------
        session_class: class
            The session class to use when instantiating the session.  This must be the
            class :py:class:`Session` itself or a derived class from
            :py:class:`Session`.
        """

        if not issubclass(session_class, Session):
            raise TypeError(
                "The session class must be a subclass of {}.".format(Session)
            )

        cls._session_class = session_class

    @classmethod
    def get_default_session_class(cls):
        """Get the default session class for :py:class:`Service`.

        Returns
        -------
        Session
            The default session class, which is :py:class:`Session` itself or a derived
            class from :py:class:`Session`.
        """

        return cls._session_class

    def __init__(self, url, token=None, auth=None, retries=None, session_class=None):
        if auth is None:
            auth = Auth()

        if token is not None:
            warn(
                "setting token at service level will be removed in future",
                DeprecationWarning,
            )
            auth._token = token

        self.auth = auth
        self.base_url = url

        if retries is None:
            self._adapter = Service.ADAPTER
        else:
            self._adapter = ThreadLocalWrapper(lambda: HTTPAdapter(max_retries=retries))

        if session_class is not None:
            # Overwrite the default session class
            if not issubclass(session_class, Session):
                raise TypeError(
                    "The session class must be a subclass of {}.".format(Session)
                )

            self._session_class = session_class

        # Sessions can't be shared across threads or processes because the underlying
        # SSL connection pool can't be shared. We create them thread-local to avoid
        # intractable exceptions when users naively share clients e.g. when using
        # multiprocessing.
        self._session = ThreadLocalWrapper(self._build_session)

    @property
    def token(self):
        """str: The bearer token used in the requests."""
        return self.auth.token

    @token.setter
    def token(self, token):
        """str: Deprecated"""
        self.auth._token = token

    @property
    def session(self):
        """Session: The session instance used by this service."""
        session = self._session.get()
        auth = add_bearer(self.token)
        if session.headers.get(HttpHeaderKeys.Authorization) != auth:
            session.headers[HttpHeaderKeys.Authorization] = auth

        return session

    def _build_session(self):
        session = self._session_class(self.base_url, timeout=self.TIMEOUT)
        session.initialize()

        adapter = self._adapter.get()
        session.mount(HttpMountProtocol.HTTPS, adapter)
        session.mount(HttpMountProtocol.HTTP, adapter)

        session.headers.update(
            {
                HttpHeaderKeys.ContentType: HttpHeaderValues.ApplicationJson,
                HttpHeaderKeys.UserAgent: "{}/{}".format(
                    HttpHeaderValues.DlPython, __version__
                ),
            }
        )

        try:
            session.headers.update(
                {
                    # https://github.com/easybuilders/easybuild/wiki/OS_flavor_name_version
                    HttpHeaderKeys.Platform: platform.platform(),
                    HttpHeaderKeys.Python: platform.python_version(),
                    # https://stackoverflow.com/questions/47608532/how-to-detect-from-within-python-whether-packages-are-managed-with-conda
                    HttpHeaderKeys.Conda: str(
                        os.path.exists(
                            os.path.join(sys.prefix, "conda-meta", "history")
                        )
                    ),
                    # https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
                    HttpHeaderKeys.Notebook: str("ipykernel" in sys.modules),
                    HttpHeaderKeys.ClientSession: uuid.uuid4().hex,
                }
            )
        except Exception:
            pass

        return session
Esempio n. 17
0
def oss():
    for url in pyl:
        global aff, days, nam
        with requests.Session() as s:
            s.mount('http://', HTTPAdapter(max_retries=10))
            ur = s.post(
                'http://www.osn.com/CMSPages/TVScheduleWebService.asmx/GetTVChannelsProgramTimeTable',
                data=url,
                headers=headers)
            pg = ur.text.replace('<?xml version="1.0" encoding="utf-8"?>',
                                 '').replace(
                                     '<string xmlns="http://tempuri.org/">',
                                     '').replace('</string>', '')
            data = json.loads(pg)
            #sleep(0.05)
            for d in data:
                day = datetime.datetime.fromtimestamp(
                    int(d['StartDateTime'].replace('/Date(', '').replace(
                        ')/', '')) // 1000).strftime('%Y-%m-%d')
                if now == day or day > now:
                    payload = {
                        "prgmEPGUNIQID": d['EPGUNIQID'],
                        "countryCode": "SA"
                    }
                    pll.append(d['EPGUNIQID'])
                    ch = ''
                    with requests.Session() as session:
                        session.mount('http://', HTTPAdapter(max_retries=10))
                        uri = session.post(
                            'http://www.osn.com/CMSPages/TVScheduleWebService.asmx/GetProgramDetails',
                            data=payload,
                            headers=headers)
                        pag = uri.text.replace(
                            '<?xml version="1.0" encoding="utf-8"?>',
                            '').replace('<string xmlns="http://tempuri.org/">',
                                        '').replace('</string>', '')
                        data = json.loads(pag)
                        nm = data[0][u'ChannelNameEnglish'].replace(' ', '_')
                        nam = data[0][u'ChannelNameEnglish']
                        days = datetime.datetime.fromtimestamp(
                            int(data[0][u'StartDateTime'].replace(
                                "/Date(", '').replace(")/", '')) //
                            1000).strftime('%Y%m%d%H%M%S')
                        days_end = datetime.datetime.fromtimestamp(
                            int(data[0][u'EndDateTime'].replace(
                                "/Date(", '').replace(")/", '')) //
                            1000).strftime('%Y%m%d%H%M%S')
                        aff = datetime.datetime.fromtimestamp(
                            int(data[0][u'EndDateTime'].replace(
                                "/Date(", '').replace(")/", '')) //
                            1000).strftime('%Y-%m-%d')
                        ch += 2 * ' ' + '<programme start="' + days + ' ' + time_zone + '" stop="' + days_end + ' ' + time_zone + '" channel="' + nm + '">' + '\n'
                        if url['channelCode'] == 'SER' or url[
                                'channelCode'] == 'YAW' or url[
                                    'channelCode'] == 'SAF' or url[
                                        'channelCode'] == 'CM1' or url[
                                            'channelCode'] == 'CM2' or url[
                                                'channelCode'] == 'FAN' or url[
                                                    'channelCode'] == 'OYH' or url[
                                                        'channelCode'] == 'OYA' or url[
                                                            'channelCode'] == 'OYC':
                            ch += '     <title lang="en">' + data[0][
                                u'Arab_Title'] + '</title>' + "\n"
                        else:
                            ch += '     <title lang="en">' + data[0][
                                u'Title'].replace('&',
                                                  'and') + '</title>' + "\n"
                        if data[0][u'Arab_Synopsis'] == u'\r\n':
                            ch += '     <desc lang="ar">' + data[0][
                                u'GenreArabicName'] + '</desc>\n  </programme>\r'
                        else:
                            ch += '     <desc lang="ar">' + data[0][
                                u'Arab_Synopsis'] + '</desc>\n  </programme>\r'
                        #ch+='     <sub-title lang="ar">'+data[0][u'GenreArabicName']+'</sub-title>'+"\n"+'  </programme>'+"\n"
                        with io.open("/etc/epgimport/osn.xml",
                                     "a",
                                     encoding='UTF-8') as f:
                            f.write(ch)
            for _ in progressbar((pll * 120), nam + " " + aff + " : ", 15):
                pass
            #sleep(0.005)
    with io.open("/etc/epgimport/osn.xml", "a", encoding="utf-8") as f:
        f.write(('</tv>').decode('utf-8'))
Esempio n. 18
0
class ThirdPartyService(object):
    """The default Descartes Labs HTTP Service used for 3rd party servers.

    This service has a default timeout and retry policy that retries HTTP requests
    depending on the timeout and HTTP status code that was returned.  This is based
    on the `requests timeouts
    <https://requests.readthedocs.io/en/master/user/advanced/#timeouts>`_
    and the `urllib3 retry object
    <https://urllib3.readthedocs.io/en/latest/reference/urllib3.util.html#urllib3.util.retry.Retry>`_.

    The default timeouts are set to 9.5 seconds for establishing a connection (slightly
    larger than a multiple of 3, which is the TCP default packet retransmission window),
    and 30 seconds for reading a response.

    The default retry logic retries up to 10 times total, a maximum of 2 for
    establishing a connection.  The backoff_factor is a random number between 1 and
    3, but will never be more than 2 minutes.  The unexpected HTTP status codes that
    will be retried are ``429``, ``500``, ``502``, ``503``, and ``504`` for any of the
    HTTP requests.

    Parameters
    ----------
    url: str
        The URL prefix to use for communication with the 3rd party server.
    session_class: class
        The session class to use when instantiating the session.  This must be a derived
        class from :py:class:`Session`.  If not provided, the default session class
        is used.  You can register a default session class with
        :py:meth:`ThirdPartyService.set_default_session_class`.

    Raises
    ------
    TypeError
        If you try to use a session class that is not derived from :py:class:`Session`.
    """

    CONNECT_TIMEOUT = 9.5
    READ_TIMEOUT = 30
    TIMEOUT = (CONNECT_TIMEOUT, READ_TIMEOUT)

    RETRY_CONFIG = Retry(
        total=10,
        read=2,
        backoff_factor=random.uniform(1, 3),
        method_whitelist=frozenset(
            [
                HttpRequestMethod.HEAD,
                HttpRequestMethod.TRACE,
                HttpRequestMethod.GET,
                HttpRequestMethod.POST,
                HttpRequestMethod.PUT,
                HttpRequestMethod.OPTIONS,
                HttpRequestMethod.DELETE,
            ]
        ),
        status_forcelist=[
            HttpStatusCode.TooManyRequests,
            HttpStatusCode.InternalServerError,
            HttpStatusCode.BadGateway,
            HttpStatusCode.ServiceUnavailable,
            HttpStatusCode.GatewayTimeout,
        ],
    )

    ADAPTER = ThreadLocalWrapper(
        lambda: HTTPAdapter(max_retries=ThirdPartyService.RETRY_CONFIG)
    )

    _session_class = Session

    @classmethod
    def set_default_session_class(cls, session_class=None):
        """Set the default session class for :py:class:`ThirdPartyService`.

        The default session is used for any :py:meth:`ThirdPartyService` that is
        instantiated without specifying the session class.

        Parameters
        ----------
        session_class: class
            The session class to use when instantiating the session.  This must be the
            class :py:class:`Session` itself or a derived class from
            :py:class:`Session`.
        """

        if not issubclass(session_class, Session):
            raise TypeError(
                "The session class must be a subclass of {}.".format(Session)
            )

        cls._session_class = session_class

    @classmethod
    def get_default_session_class(cls):
        """Get the default session class for the :py:class:`ThirdPartyService`.

        Returns
        -------
        Session
            The default session class, which is :py:class:`Session` itself or a derived
            class from :py:class:`Session`.
        """

        return cls._session_class

    def __init__(self, url="", session_class=None):
        self.base_url = url

        if session_class is not None:
            if not issubclass(session_class, Session):
                raise TypeError(
                    "The session class must be a subclass of {}.".format(Session)
                )

            self._session_class = session_class

        self._session = ThreadLocalWrapper(self._build_session)

    @property
    def session(self):
        return self._session.get()

    def _build_session(self):
        session = self._session_class(self.base_url, timeout=self.TIMEOUT)
        session.initialize()

        session.mount(HttpMountProtocol.HTTPS, self.ADAPTER.get())
        session.headers.update(
            {
                HttpHeaderKeys.ContentType: HttpHeaderValues.ApplicationOctetStream,
                HttpHeaderKeys.UserAgent: "{}/{}".format(
                    HttpHeaderValues.DlPython, __version__
                ),
            }
        )

        return session
Esempio n. 19
0
def main():
    # Command line parsing
    parser = argparse.ArgumentParser(
        prog="archiver",
        description="A script to backup a web pages with Internet Archive",
    )
    parser.add_argument(
        "--version",
        action="version",
        version="%(prog)s {version}".format(version=__version__),
    )
    parser.add_argument(
        "urls",
        nargs="*",
        default=[],
        help="the URLs of the pages to archive",
    )
    parser.add_argument(
        "--file",
        help="path to a file containing urls to save (one url per line)",
        required=False,
    )
    parser.add_argument(
        "--sitemaps",
        nargs="+",
        default=[],
        help=
        "one or more URIs to sitemaps listing pages to archive; local paths must be prefixed with '{f}'"
        .format(f=LOCAL_PREFIX),
        required=False,
    )
    parser.add_argument(
        "--log",
        help="set the logging level, defaults to WARNING",
        dest="log_level",
        default=logging.WARNING,
        choices=[
            "DEBUG",
            "INFO",
            "WARNING",
            "ERROR",
            "CRITICAL",
        ],
    )
    parser.add_argument(
        "--log-to-file",
        help="redirect logs to a file",
        dest="log_file",
        default=None,
    )
    parser.add_argument(
        "--archive-sitemap-also",
        help="also submit the URL of the sitemap to be archived",
        dest="archive_sitemap",
        default=False,
        action="store_true",
    )
    parser.add_argument(
        "--jobs",
        "-j",
        help="run this many concurrent URL submissions, defaults to 1",
        default=1,
        type=int,
    )
    parser.add_argument(
        "--rate-limit-wait",
        help=
        "number of seconds to wait between page requests to avoid flooding the archive site, defaults to 5; also used as the backoff factor for retries",
        dest="rate_limit_in_sec",
        default=5,
        type=int,
    )

    args = parser.parse_args()

    # Set the logging level based on the arguments
    #
    # If `filename` is None, the constructor will set up a stream, otherwise it
    # will use the file specified.
    logging.basicConfig(level=args.log_level, filename=args.log_file)

    logging.debug("Archiver Version: %s", __version__)
    logging.debug("Arguments: %s", args)

    archive_urls = []
    # Add the regular pages
    if args.urls:
        logging.info("Adding page URLs to archive")
        logging.debug("Page URLs to archive: %s", args.urls)
        archive_urls += map(format_archive_url, args.urls)

    # Set up retry and backoff
    session = requests.Session()

    retries = Retry(
        total=5,
        backoff_factor=args.rate_limit_in_sec,
        status_forcelist=[500, 502, 503, 504, 520],
    )

    session.mount("https://", HTTPAdapter(max_retries=retries))
    session.mount("http://", HTTPAdapter(max_retries=retries))

    # Download and process the sitemaps
    remote_sitemaps = set()
    logging.info("Parsing sitemaps")
    for sitemap_url in args.sitemaps:

        # Save the remote ones, incase the user wants us to backthem up
        if sitemap_is_local(sitemap_url):
            logging.debug("The sitemap '%s' is local.", sitemap_url)
            sitemap_xml = load_local_sitemap(sitemap_url)
        else:
            logging.debug("The sitemap '%s' is remote.", sitemap_url)
            if args.archive_sitemap:
                remote_sitemaps.add(sitemap_url)
            sitemap_xml = download_remote_sitemap(sitemap_url, session=session)

        for url in extract_pages_from_sitemap(sitemap_xml):
            archive_urls.append(format_archive_url(url))

    # Archive the sitemap as well, if requested
    if args.archive_sitemap:
        logging.info("Archiving sitemaps")
        if remote_sitemaps:
            archive_urls += map(format_archive_url, remote_sitemaps)
        else:
            logging.debug("No remote sitemaps to backup.")

    # And URLs from file
    if args.file:
        logging.info("Reading urls from file: %s", args.file)
        with open(args.file) as file:
            urls_from_file = (u.strip() for u in file.readlines() if u.strip())
        archive_urls += map(format_archive_url, urls_from_file)

    # Deduplicate URLs
    archive_urls = set(archive_urls)

    # Archive the URLs
    logging.debug("Archive URLs: %s", archive_urls)
    pool = mp.Pool(processes=args.jobs)
    partial_call = partial(call_archiver,
                           rate_limit_wait=args.rate_limit_in_sec,
                           session=session)
    pool.map(partial_call, archive_urls)
    pool.close()
    pool.join()
def find_my_rank(request):
    current_user = request.user.userprofile

    summoner_name = current_user.game_tag
    my_region = current_user.region
    APIKey = "RGAPI-bf168e6e-8919-4967-873d-6109f4cad577"

    summoner_data_url = "https://" + my_region + \
                        ".api.riotgames.com/lol/summoner/v4/summoners/by-name/" \
                        + summoner_name + "?api_key=" + APIKey

    response = requests.get(summoner_data_url)
    print(response.status_code)
    summoner_data = response.json()
    print(summoner_data)

    if response.status_code != 200:
        print("Non esiste nessun evocatore con questo nome!")  # aggiungere questo messaggio nella template
        return

    ID = summoner_data['id']

    session = requests.Session()
    retry = Retry(connect=3, backoff_factor=0.5)
    adapter = HTTPAdapter(max_retries=retry)
    session.mount('http://', adapter)
    session.mount('https://', adapter)
    session.get(summoner_data_url)

    ranked_data_url = "https://" + my_region + \
                      ".api.riotgames.com/lol/league/v4/entries/by-summoner/" \
                      + ID + "?api_key=" + APIKey

    response2 = requests.get(ranked_data_url)

    ranked_data = response2.json()

    """
    Se il tipo di ranked e' FLEX allora aggiorno i campi relativi a quel tipo di coda
    e per la coda in SOLO, altrimenti se trova solamente il tipo SOLO, assegno il campo
    relativo a FLEX vuoto e vado ad inserire i dati per SOLO.

    Aggiornamento: aggiungo anche le vittorie e sconfitte per ogni tipo di lega.
    Inoltre gestisco sia se qualcuno ha fatto solo ranked SOLO che chi e' unranked.
    """
    if len(ranked_data) == 2 and ranked_data[1]['queueType'] == 'RANKED_FLEX_SR':
        current_user.ranked_flex = (
            ranked_data[1]['tier'], ranked_data[1]['rank'], ranked_data[1]['leaguePoints'])
        current_user.wins_flex = ranked_data[1]['wins']
        current_user.losses_flex = ranked_data[1]['losses']
        if ranked_data[0]['queueType'] == 'RANKED_SOLO_5x5':
            current_user.ranked_solo = (
                ranked_data[0]['tier'], ranked_data[0]['rank'], ranked_data[0]['leaguePoints'])
            current_user.wins_solo = ranked_data[0]['wins']
            current_user.losses_solo = ranked_data[0]['losses']
        else:
            current_user.ranked_solo = "Not enough solo queue played"
    elif len(ranked_data) == 2 and ranked_data[0]['queueType'] == 'RANKED_FLEX_SR':
        current_user.ranked_flex = (
            ranked_data[0]['tier'], ranked_data[0]['rank'], ranked_data[0]['leaguePoints'])
        current_user.wins_flex = ranked_data[1]['wins']
        current_user.losses_flex = ranked_data[1]['losses']
        if ranked_data[1]['queueType'] == 'RANKED_SOLO_5x5':
            current_user.ranked_solo = (
                ranked_data[1]['tier'], ranked_data[1]['rank'], ranked_data[1]['leaguePoints'])
            current_user.wins_solo = ranked_data[1]['wins']
            current_user.losses_solo = ranked_data[1]['losses']
        else:
            current_user.ranked_solo = "Not enough solo queue played"
    elif len(ranked_data) == 1 and ranked_data[0]['queueType'] == 'RANKED_SOLO_5x5':
        current_user.ranked_flex = "Not enough flex played"
        current_user.wins_flex = "0"
        current_user.losses_flex = "0"
        current_user.ranked_solo = (
            ranked_data[0]['tier'], ranked_data[0]['rank'], ranked_data[0]['leaguePoints'])
        current_user.wins_solo = ranked_data[0]['wins']
        current_user.losses_solo = ranked_data[0]['losses']
    elif len(ranked_data) == 0:
        current_user.ranked_flex = "Not enough flex played"
        current_user.ranked_solo = "Not enough solo queue played"
        current_user.wins_flex = "0"
        current_user.losses_flex = "0"
        current_user.wins_solo = "0"
        current_user.losses_solo = "0"

    current_user.save()
Esempio n. 21
0
 def __init__(self, max_retries=5):
     self.session = requests.Session()
     self.session.mount(self.PAPRIKA_BASE_URL,
                        HTTPAdapter(max_retries=max_retries))
Esempio n. 22
0
"""Wraps the github3 library to configure request retries."""

from cumulusci.core.exceptions import GithubException
from github3 import enterprise_login
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

retries = Retry(status_forcelist=(502, 503, 504), backoff_factor=0.3)
adapter = HTTPAdapter(max_retries=retries)


def get_github_api(username, password):
    gh = enterprise_login(url="https://git.uncc.edu",
                          username=username,
                          password=password)
    gh.session.mount("http://", adapter)
    gh.session.mount("https://", adapter)
    return gh


def validate_service(options):
    username = options["username"]
    password = options["password"]
    gh = get_github_api(username, password)
    try:
        gh.rate_limit()
    except Exception as e:
        raise GithubException(
            "Could not confirm access to the GitHub API: {}".format(str(e)))
Esempio n. 23
0
 def __init__(self, config):
     self.s = requests.session()
     self.config = config
     self.s.mount('http://', HTTPAdapter(max_retries=3))
     self.s.mount('https://', HTTPAdapter(max_retries=3))
from prometheus_client.core import (
    REGISTRY,
    CounterMetricFamily,
    GaugeMetricFamily,
    HistogramMetricFamily,
)
from prometheus_client import start_http_server

from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

retry_strategy = Retry(total=3,
                       backoff_factor=1,
                       status_forcelist=[429, 500, 502, 503, 504],
                       method_whitelist=["HEAD", "GET", "OPTIONS"])
adapter = HTTPAdapter(max_retries=retry_strategy)
session = requests.Session()
session.mount("https://", adapter)
session.mount("http://", adapter)

START = None

JENKINS_URL = os.environ['JENKINS_URL']  # Required
if 'JENKINS_USERNAME' in os.environ and 'JENKINS_TOKEN' in os.environ:
    JENKINS_USERNAME = os.environ['JENKINS_USERNAME']
    JENKINS_TOKEN = os.environ['JENKINS_TOKEN']
    AUTH = (JENKINS_USERNAME, JENKINS_TOKEN)
else:
    AUTH = None

DEFAULT_IGNORED = '00-all-enabled,01-all-disabled,all,All,My View'
Esempio n. 25
0
#ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
#ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
#logger.addHandler(ch)

 
logger.warning('Script is starting')

url = "http://172.16.10.11:80/api/refreshStates"
url_openhab = 'http://localhost:8080/rest/items/' #openhbitem etc
logger.debug('setting http adapter to ' + url)
fibaroLite_adapter = HTTPAdapter(max_retries=3)
session = requests.Session()
session.mount(url, fibaroLite_adapter)


#get the last ID to be next in line with the fibaro script
while True:
    logger.debug('Retrieving last_id')
            
    try:
        last_response = session.get(url, auth=('admin', 'admin'), timeout=31)
        
        #get the last ID to be next in line with the fibaro script
        last_response_content = json.loads(last_response.text)
        last_call_id = last_response_content['last']
        logger.debug('Last ID received: ' + str(last_call_id))
Esempio n. 26
0
def set_retry_strategy(prefix='https://', *args, **kwargs):
    'Enable a custom retry strategy.'
    requests.Session.__enter__ = lambda self: self.mount(
        prefix, HTTPAdapter(max_retries=Retry(*args, **kwargs))) or self
Esempio n. 27
0
def adapt_datetime(ts):
    return time.mktime(ts.timetuple())


def convert_datetime(ts):
    return datetime.datetime.fromtimestamp(ts)


sqlite3.register_adapter(datetime.datetime, adapt_datetime)
sqlite3.register_converter("DATETIME", convert_datetime)


requests_session = requests.Session()
retries = Retry(total=5, backoff_factor=5, status_forcelist=[500, 502, 503, 504])
requests_session.mount("https://", HTTPAdapter(max_retries=retries))
requests_session.mount("http://", HTTPAdapter(max_retries=retries))


get = functools.partial(requests_session.get, timeout=60)
post = functools.partial(requests_session.post, timeout=60)


class Pet(object):
    def __init__(self, site, site_name, pet_id, pet_name, pet_url, img_srcs):
        self.site = site
        self.site_name = site_name
        self.pet_id = pet_id
        self.pet_name = pet_name
        self.pet_url = pet_url
        self.img_srcs = img_srcs
Esempio n. 28
0
# We should consider keeping the timeout reasonably small and adding an
# environment variable as an option to configure larger timeouts if it turns
# out that much larger timeouts are necessary for slow connections.

requestTimeout = (3.1, 27)
putS3Timeout = 300
max_retries = int(os.environ.get('NOG_MAX_RETRIES', 5))


def _apiUrl():
    base = os.environ.get('NOG_API_URL', 'http://localhost:3000/api')
    return base + '/v1'


session = requests.Session()
session.mount(_apiUrl(), HTTPAdapter(max_retries=max_retries))


def _printmsg(level, msg):
    if level <= verbosity:
        print(msg)


class ErrataError(Exception):
    pass


class EtagError(Exception):
    pass

def session():
    """Sets up a HTTP session with a retry policy."""
    s = requests.Session()
    retries = Retry(total=5, backoff_factor=0.5)
    s.mount("http://", HTTPAdapter(max_retries=retries))
    return s
##### GLOBAL DATE AND TIME VARIABLES ##
dateString = ""
timeString = ""
hourString = ""
minuteString = ""
secondString = ""
lastFeedHour = 0                # Sets to hourString if we have fed this hour. If Hour and lastFeedHour don't match, dispense food!
lastPriceCheckHourNANO = 0      # Sets to hourString if we have queried the API for the latest Crypto Price
lastPriceCheckHourBTC = 0       # tracks last time queried API for latest BTC Price, same as above for NANO
#######################################
##### Session Requests ################
sessionConnectionSnapyIO = requests.session()            # Used by requests to keep the session rather than a new one every request
sessionConnectionBlockIO = requests.session()            # Used by requests for BlockIO API
sessionConnectionCharmant = requests.session()           # Used by requests for Charmant and current Market Prices
retry = Retry(connect=3, backoff_factor=.5)              # Incrementally sleeps until request works
transportAdapterSnapyIO = HTTPAdapter(max_retries=retry)    
transportAdapterBlockIO = HTTPAdapter(max_retries=retry)    # Setup Max Retries for Session Object
transportAdapterCharmant = HTTPAdapter(max_retries=retry)
sessionConnectionSnapyIO.mount(SnapyBalanceURL, transportAdapterSnapyIO)
sessionConnectionBlockIO.mount('https://block.io/', transportAdapterBlockIO)    # Use Transport Adapter for all endpoints that start with this URL
sessionConnectionCharmant.mount('https://api.coinmarketcap.com', transportAdapterCharmant)
#######################################
#######################################
#######################################


def generateWallet():

    jsonParameter = {"password":"******"}
    r = requests.post(SnapyWalletURL, headers = headersNANO, json = jsonParameter)