Example #1
1
def main():
    for user in create_login_payload():
        logging.info('Opening a Grafana session...')
        session = Session()
        login(session, user)

        if check_initialized(session):
            logging.info('Grafana has already been initialized, skipping!')
            return

        logging.info('Attempting to add configured datasource...')
        r = session.post('{url}/api/datasources'.format(url=GRAFANA_URL),
                         json=create_datasource_payload())
        logging.debug('Response: %r', r.json())
        r.raise_for_status()

        for path in sorted(glob.glob('{dir}/*.json'.format(dir=DASHBOARDS_DIR))):
            logging.info('Creating dashboard from file: {path}'.format(path=path))
            r = session.post('{url}/api/dashboards/db'.format(url=GRAFANA_URL),
                             json=create_dashboard_payload(path))
            logging.debug('Response: %r', r.json())
            r.raise_for_status()

        logging.info('Ending %r session...', user.get('user'))
        session.get('{url}/logout'.format(url=GRAFANA_URL))

    logging.info('Finished successfully.')
Example #2
0
    def call_api(self, operation_name, params=None):
        if not operation_name:
            return
        if params is None:
          params = {}

        api_session = Session()
        api_url = self._endpoint_url + operation_name
        req = Request('POST', api_url)
        prepped = req.prepare()
        self._encode_headers(prepped.headers)
        prepped.headers['Content-Type'] = 'application/json'
        prepped.headers['User-Agent'] = self._build_user_agent_header()
        self._signer.sign(prepped)

        # check if operation is for 'upload'
        if operation_name == 'upload':
            # get s3url for the upload and then do a upload
            resp = self._upload_file(params, prepped, api_session)
            return resp
        # prepare the body
        serializer = Serializer()
        serial_obj = serializer.serialize_to_request(params, None)
        prepped.prepare_body(serial_obj['body'], None)
        resp = api_session.send(prepped)
        return resp
Example #3
0
def _send_sms(dest, message):
    request = requests.Request('POST',
        'http://api.infobip.com/api/v3/sendsms/json')

    request.headers = { 'content-type' : 'application/json' }
    recipients = []
    for rec in dest:
      recipients.append({'gsm' : rec})
    
    json_obj = {
        'authentication' : {
          'username' : credential.LOGIN, #TODO: make credentials global
          'password' : credential.PASS
          },
        'messages'      : {
          'sender'    : 'Shoprite',
          'text'      : message,
          'recipients': recipients
          } # close messages
        } # close json_obj

    request.data = json.dumps(json_obj)

    # send the request
    s = Session()

    try:
      return s.send(request.prepare())
    except:
      return None
Example #4
0
    def __init__(self, env, id_correios, password,
                 cert=False, log_config=None, timeout=None):
        ''' Webservice initialization.

        Depending on the env get a different wsdl definition.
        New Correios SIGEP uses HTTPAuth to do requests.

        Args:
            env (str): Environment used to get the wsdl
            id_correios (str): IdCorreios given by correios website
            password (str): password vinculated to the IdCorreios
            log_config (dict): Dictionary configurations of logging
        '''

        ''' Untrusted ssl certificate for homolog envs see more at:

        https://www.ssllabs.com/ssltest/analyze.html?d=apphom.correios.com.br
        '''
        if cert is False:
            verify = False
        else:
            verify = certifi.where()

        self.timeout = timeout or 300

        if log_config is not None and isinstance(log_config, dict):
            """ Example config from zeep documentation:

            {
                'version': 1,
                'formatters': {
                    'verbose': {
                        'format': '%(name)s: %(message)s'
                    }
                },
                'handlers': {
                    'console': {
                        'level': 'DEBUG',
                        'class': 'logging.StreamHandler',
                        'formatter': 'verbose',
                    },
                },
                'loggers': {
                    'zeep.transports': {
                        'level': 'DEBUG',
                        'propagate': True,
                        'handlers': ['console'],
                    },
                }
            }
            """
            logging.config.dictConfig(log_config)

        session = Session()
        session.timeout = self.timeout
        session.verify = verify
        session.auth=(id_correios, password)

        t = Transport(session=session);
        self.client = Client(wsdl=self.get_env(env), transport=t)
Example #5
0
    def _upload_file(self, params, prepped, api_session):
        # header for upload
        s3_session = Session()
        api_url = self._endpoint_url + "getS3url"
        req = Request('POST', api_url)
        upload_prepped = req.prepare()
        self._encode_headers(upload_prepped.headers)
        upload_prepped.headers['Content-Type'] = 'application/json'
        upload_prepped.headers['User-Agent'] = self._build_user_agent_header()
        self._signer.sign(upload_prepped)
        # prepare params for s3 url
        url_parameters = {'fileName': '', 'tenant': ''}
        if 'fileName' in params and params['fileName']:
            url_parameters['fileName'] = params['file_name']
        elif 'fileLocation' in params and params['fileLocation']:
            if os.path.isfile(params['fileLocation']):
                fileName = os.path.basename(params['fileLocation'])
                url_parameters['fileName'] = fileName

        if 'tenant' in params and params['tenant']:
            url_parameters['tenant'] = params['tenant']
        # prepare the body
        serializer = Serializer()
        serial_obj = serializer.serialize_to_request(url_parameters, None)
        upload_prepped.prepare_body(serial_obj['body'], None)
        resp = s3_session.send(upload_prepped)
        resp = json.loads(json.dumps(resp.json()))

        # upload file to S3 bucket
        if 'url' in resp and 'fileLocation' in params and params['fileLocation']:
            put(resp['url'], data=open(params['fileLocation']).read())
            # build upload parameters
            upload_params = {'rowDelim': '', 'colDelim': '', 'headerFields': [],
                             'tenant': '', 'fileType': 0}
            # now do actual upload
            if 'tenant' in params and params['tenant']:
                upload_params['tenant'] = params['tenant']
            upload_params['fileLocation'] = params['fileLocation']
            if os.path.isfile(params['fileLocation']):
                fileName = os.path.basename(params['fileLocation'])
                upload_params['fileName'] = fileName
            if 'fileName' in params and params['fileName']:
                upload_params['fileName'] = params['fileName']
            if 'sourcePlatform' in params and params['sourcePlatform']:
                upload_params['sourcePlatform'] = params['sourcePlatform']
            if 'colDelim' in params and params['colDelim']:
                upload_params['colDelim'] = params['colDelim']
            if 'rowDelim' in params and params['rowDelim']:
                upload_params['rowDelim'] = params['rowDelim']
            if 'headerFields' in params and params['headerFields']:
                upload_params['headerFields'] = params['headerFields']
            if 'fileType' in params and params['fileType']:
                upload_params['fileType'] = params['fileType']
            # prepare the body
            serializer = Serializer()
            serial_obj = serializer.serialize_to_request(upload_params, None)
            prepped.prepare_body(serial_obj['body'], None)
            resp = api_session.send(prepped)
            resp = json.dumps(resp.json())
        return resp
def r_next_page(url,plot):
	next_url = url
	data = {'scrollOffset':plot}
	session = Session()
	session.head('http://www.radionomy.com')
	response = session.post(
	url =url,
	data=data,
	headers=headers)
	plot = plot + 1
	match = re.compile('href="(.+?)" rel="internal"><img class="radioCover" src="(.+?)" alt="(.+?)" ').findall(str(response.text))
	for url,image,title in match:
		url = str(url).replace('/en/radio', 'http://listen.radionomy.com').replace('/index', '.m3u')
		h = HTMLParser.HTMLParser()
		try: title = h.unescape(title)
		except UnicodeDecodeError:
			continue
		image = image.replace('s67.jpg', 's400.jpg')
		try: add_directory3(title,url,140, defaultfanart ,image,plot='')
		except KeyError:
			continue
		xbmcplugin.setContent(pluginhandle, 'songs')
	add_directory2('Next Page>>', next_url, 133, defaultfanart, defaultimage, plot)
	xbmc.executebuiltin("Container.SetViewMode("+str(confluence_views[6])+")")
	xbmcplugin.endOfDirectory(addon_handle)
Example #7
0
def start_crawl(pages=2):
    session = Session()
    session.verify = False
    url = 'https://www.sgs.gov.cn/notice/search/ent_except_list'
    b = RoboBrowser(session=session)
    b.open(url)

    basic_info = pd.DataFrame(columns=['name', 'url', 'ID', 'date'])
    detail_info = pd.DataFrame()
    for i in range(pages):  # 改变这个数字控制爬取页数, 网页限制最大50页
        form = b.get_form(id='formInfo')
        if not form:
            continue
        form['condition.pageNo'].value = str(i + 1)  # 修改表单控制页数
        form['condition.keyword'].value = ''
        try:  # dirty fix...
            b.submit_form(form)
            basic_info = basic_info.append(parse_table(b), ignore_index=True)
        except AttributeError:
            pass

    for url in basic_info['url']:
        detail = get_detail(url)
        if isinstance(detail, pd.DataFrame):
            detail_info = detail_info.append(detail, ignore_index=True)

    return basic_info, detail_info
Example #8
0
    def doRequest(self, method, url, params=None, parse=True, data=None):
        """ Send HTTP request, with given method,
            credentials and data to the given URL,
            and return the success and the result on success.
        """
        if not self.bitbucket.auth:
            raise ValueError("No auth credentials.")

        if data:
            data = dict(data)
        else:
            data = {}
        
        r = Request(
            method=method,
            url=url,
            auth=self.bitbucket.auth,
            params=params,
            data=data
        )
        s = Session()
        resp = s.send(r.prepare())
        status = resp.status_code
        text = resp.text
        error = resp.reason
        if status >= 200 and status < 300:
            if parse:
                return json.loads(text)
            else:
                return text
        else:
            raise exceptions.DispatchError(text, url, error, status)
def get_scigraph_diff(scigraph_prod: str, scigraph_dev: str,
                      conf: dict, query_name: str) -> str:
    output_md = str()
    prod_results = get_scigraph_results(scigraph_prod, conf['query'])
    dev_results = get_scigraph_results(scigraph_dev, conf['query'])
    if prod_results == 'timeout' or dev_results == 'timeout':
        formatted_diff = {"request timeout": '0'}
    else:
        diff = diff_facets(dev_results, prod_results)
        formatted_diff = convert_diff_to_md(diff)

    output_md += "{}\n".format(add_md_header(query_name, 4))

    params = {
        'cypherQuery': conf['query']
    }

    sesh = Session()
    prod_req = sesh.prepare_request(Request('GET', scigraph_prod, params=params))
    dev_req = sesh.prepare_request(Request('GET', scigraph_dev, params=params))

    output_md += add_href(prod_req.url, "Production Query")
    output_md += '\n\n'
    output_md += add_href(dev_req.url, "Dev Query")
    output_md += '\n\n'

    diff_list = [(k, v) for k, v in formatted_diff.items()]
    diff_list.sort(key=lambda tup: int(re.search(r'\d+', tup[1]).group(0)), reverse=True)
    output_md += add_md_table(diff_list, conf['headers'])
    output_md += "\n\n"

    return output_md
def getReferer(url, referer):
    useragent = (
        "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.14) Gecko/20080418 Ubuntu/7.10 (gutsy) Firefox/2.0.0.14"
    )
    session = Session()
    session.headers.update({"referer": referer, "user-agent": useragent})
    return session.get(url)
Example #11
0
    def _serializeIngredients(self):
        """Convert children ingredients in triples"""
        res = []

        session = Session()
        nutritionalInformations = {}
        ingredients = []
        for ingredient in self.ingredients:

            response = session.get(config.USDA_API.format(config.USDA_API_KEY, ingredient['food'])).json()

            ing = Ingredient(name=response.get('report').get('food').get('name'),
                             quantity=ingredient['quantity'],
                             nutrients=response.get('report').get('food').get('nutrients'))

            nutritionalInformations = self._calculateNutrients(ingredient=ing, data=nutritionalInformations)
            ingredients.append(ing)

        ingredientList = IngredientList(ingredients)
        res.append((self.uri, SFO.ingredients, ingredientList.getURI()))
        res.extend(ingredientList.serialize())


        res.extend(self._parseNutritionTable(nutritionalInformations, res))
        return res
Example #12
0
    def recognize_captcha(self, str_image_path):
        # CAPTCHA画像の読み込み
        bin_captcha = open(str_image_path, 'rb').read()

        # base64でCAPTCHA画像をエンコード
        str_encode_file = base64.b64encode(bin_captcha).decode()

        # APIのURLを指定
        str_url = "https://vision.googleapis.com/v1/images:annotate?key="

        # 事前に取得したAPIキー
        str_api_key = ""

        # Content-TypeをJSONに設定
        str_headers = {'Content-Type': 'application/json'}

        # Cloud Vision APIの仕様に沿ってJSONのペイロードを定義。
        # CAPTCHA画像からテキストを抽出するため、typeは「TEXT_DETECTION」にする。
        str_json_data = {
            'requests': [
                {
                    'image': {
                        'content': str_encode_file
                    },
                    'features': [
                        {
                            'type': "LABEL_DETECTION",
                            'maxResults': 10
                        }
                    ]
                }
            ]
        }

        # リクエスト送信
        obj_session = Session()
        obj_request = Request("POST",
                              str_url + str_api_key,
                              data=json.dumps(str_json_data),
                              headers=str_headers
                              )
        obj_prepped = obj_session.prepare_request(obj_request)
        obj_response = obj_session.send(obj_prepped,
                                        verify=True,
                                        timeout=60
                                        )

        # 分析結果の取得
        if obj_response.status_code == 200:
            #logging
            logging.basicConfig(filename='example.log', level=logging.DEBUG)
            logging.debug(obj_response.text)
            #1番目のみを取得
            obj = obj_response.text
            keywords = json.loads(obj)
            keyword = keywords['responses'][0]['labelAnnotations'][0]['description']
            print("Tag:" + keyword)
            return(keyword)
        else:
            return "error"
Example #13
0
 def _get_response(self, content_type, url, headers, file_descriptor):
     s = Session()
     response = None
     req = Request(content_type, url, headers=headers, data=file_descriptor)
     prepared = req.prepare()
     try:
         response = s.send(prepared)
     except exceptions.Timeout:
         raise
     except exceptions.TooManyRedirects:
         raise
     except exceptions.RequestException:
         raise
     if response.status_code != requests.codes.ok:
         try:
             raise BackblazeException(response.status_code,
                                      response.json()['message'],
                                      response,
                                      headers)
         except ValueError:
             raise BackblazeException(response.status_code,
                                      response.text,
                                      response,
                                      headers)
     return response
Example #14
0
class Site:
    def __init__(self, username, password):
        self.username = username
        self.password = password
        self.session = None

    def url(self):
        return "http://{}/collection/all".format(HOST)

    def login(self):
        self.session = Session()
        # drupal requires that you first GET the form
        r = self.session.get(self.url())
        # then POST to it
        s = self.session.post(
            self.url(), data={
                'name': self.username, 'pass': self.password,
                'form_id': 'user_login',
                'op': 'Log in',
            },
            headers={
                'referer': self.url(),
            }
        )
        print("=== logged in ===")
        return self.session

    def get_session(self):
        if self.session is not None:
            return self.session
        self.session = self.login()
        return self.session

    def get_collection_page(self, page):
        return CollectionPage(self.session, page)
Example #15
0
def get_response(url, **kwargs):
    header_info = {
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/44.0.2403.157 Safari/537.36'
    }
    if 'retries' in kwargs:
        retries = kwargs.pop('retries')
        kwargs['headers'] = header_info
    else:
        retries = 3
    if 'sess' in kwargs:
        sess = kwargs.pop('sess')
    else:
        sess = Session()
    if 'timeout' not in kwargs:
        kwargs['timeout'] = 10
    response = None
    try:
        response = sess.get(url, **kwargs)
    except Timeout, e:
        if retries > 0:
            kwargs['retries'] = retries - 1
            kwargs['sess'] = sess
            response = get_response(**kwargs)
        else:
            print e
Example #16
0
def get_service(hass, config):
    """ Get the NMA notification service. """

    if not validate_config(config,
                           {DOMAIN: [CONF_API_KEY]},
                           _LOGGER):
        return None

    try:
        # pylint: disable=unused-variable
        from requests import Session

    except ImportError:
        _LOGGER.exception(
            "Unable to import requests. "
            "Did you maybe not install the 'Requests' package?")

        return None

    nma = Session()
    response = nma.get(_RESOURCE + 'verify',
                       params={"apikey": config[DOMAIN][CONF_API_KEY]})
    tree = ET.fromstring(response.content)

    if tree[0].tag == 'error':
        _LOGGER.error("Wrong API key supplied. %s", tree[0].text)
    else:
        return NmaNotificationService(config[DOMAIN][CONF_API_KEY])
Example #17
0
    def login(self, username, password):
        s = Session()
        # login over bogi.ru
        params = {"login": username, "password": password}
        r1 = s.post(self.login_url, params, verify=False)
        # in case of failed login, bogi redirects to:
        # http://www.lostfilm.tv/blg.php?code=6&text=incorrect%20login/password
        if r1.request.url != self.login_url:
            url = urlparse(r1.url)
            if url.netloc == self.netloc:
                query = parse_qs(url.query)
                code = int(query.get('code', ['-1'])[0])
                text = query.get('text', "-")
                r1.encoding = 'windows-1251'
                message = r1.text
                raise LostFilmTVLoginFailedException(code, text, message)
            else:
                raise LostFilmTVLoginFailedException(-1, None, None)

        # callback to lostfilm.tv
        soup = BeautifulSoup(r1.text)
        inputs = soup.findAll("input")
        action = soup.find("form")['action']
        cparams = dict([(i['name'], i['value']) for i in inputs if 'value' in i.attrs])
        s.post(action, cparams, verify=False)

        # call to profile page
        r3 = s.get(self.profile_url)

        # read required params
        self.c_uid = s.cookies['uid']
        self.c_pass = s.cookies['pass']
        self.c_usess = self.search_usess_re.findall(r3.text)[0]
Example #18
0
    def send(self, request, params, file_path, m):
        dummy_response(m, request, params, file_path)

        session = Session()
        res = session.send(request.prepare())

        return res
    def handle(self, *args, **options):
        s = Session()
        if len(args) < 2:
            print "Usage: replay_error <host:port> <portable request>"
            sys.exit(1)

            # self.request('GET', '{base_url}/{url}'.format(
            #     base_url=self.base_url.rstrip('/'),
            #     url=url.lstrip('/'),
            # ),
            # params=params or {},
            # headers=self.base_headers,
            # cookies={
            #     'token': self.token or '',
            # }

        portable = json.loads(base64.b64decode(args[1]))
        req = Request(
            portable['method'],
            'http://{host}{path}'.format(
                host=args[0],
                path=portable['path'],
            ),
            params=portable['get'],
            data=portable['post'],
            cookies=portable['cookies'],
        ).prepare()
        res = s.send(req)
        print res.content
Example #20
0
def get_all_setlists(artist, page_number, sets_per_page):
    headers = {'Accept': 'application/json'}
    url = "http://api.setlist.fm/rest/0.1/search/setlists?artistName={0}&p={1}".format(artist, page_number)
    session = Session()
    response = session.get(url, headers=headers)
    data = response.json()

    setlists = data['setlists']['setlist']
    total = data['setlists']['@total']
    total_pages = math.ceil(int(total) / sets_per_page)

    # Continue to make requests until max setlists are downloaded
    for page in range(page_number + 1, total_pages + 1):
        print('{0} Page {1}'.format(artist, page))
        url = "http://api.setlist.fm/rest/0.1/search/setlists?artistName={0}&p={1}".format(artist, page)
        response = session.get(url, headers=headers)
        data = response.json()

        # If more than one result, concatenate lists, else append element to list.
        if type(data['setlists']['setlist']) is list:
            setlists = setlists + data['setlists']['setlist']
        elif type(data['setlists']['setlist']) is dict:
            setlists.append(data['setlists']['setlist'])

    return setlists
Example #21
0
    def __send_request(self, url, data=None, method='GET', isKeyRequired=True):
        if isKeyRequired and not self._key:
            raise Exception('API Key is required.  Get the API key from http://rimuhosting.com/cp/apikeys.jsp.  Then export RIMUHOSTING_APIKEY=xxxx (the digits only) or add RIMUHOSTING_APIKEY=xxxx to a ~/.rimuhosting file.')
        headers = {
            'Content-Type': 'application/json',
            'Accept': 'application/json'
        }
        if isKeyRequired:
            headers['Authorization']= "rimuhosting apikey=%s" % self._key
        
        url = urllib.parse.urljoin(self._base_url, url)

        data = data if isinstance(data, str) else json.dumps(data)

        s = Session()
        req = Request(method, url,
                      data=data,
                      headers=headers
                      )
        prepped = s.prepare_request(req)
        resp = s.send(prepped, timeout=3600)
        if not resp.ok:
            message = resp.text
            try: 
                j2 = resp.json()
                for val in j2:
                  if "error_info" in j2[val] and "human_readable_message" in j2[val]["error_info"]:
                      message = j2[val]["error_info"]["human_readable_message"]
                  break
            finally:
                raise Exception(resp.status_code, resp.reason, message)
        
        return resp
 def get_request(url,header):
     req = Request(method="GET",url=url,headers=header)
     req_prepared = req.prepare()
     res = Response()
     s = Session()
     res = s.send(req_prepared)
     return res
Example #23
0
def update_test(is_crash=0):
    config = merge_config()
    sess = Session()
    url = "{}/project/td".format(config['tbd_server'])
    post_params={
        'host_name': socket.gethostname(),
        'ip_addr': IP_ADDR,
        'tc_name': config['tc_name'],
        'test_client': config['monitor_client'],
        'build_verion': config['build_version'],
        'is_crash': is_crash,
        'ta_name': None,
        'tc_result': None,
        'ta_result': None,
    }
    resp = sess.post(url, data=post_params)
    log.debug("Post to {} with parameters {}".format(url, post_params))
    try:
        result = resp.json()
        if result['code']:
            log.error("update test data failed: {}!".format(result['result']))
        else:
            result = result['result']
            log.debug("update test data successfully: {}".format(result))
    except Exception as err:
        log.error("Failed to parse json result: {}!".format(err))
    def get_url_page(self, url="http://www.optimalstackfacts.org/"):

        for l in xrange(3):
            # proxies_url = choice(self.proxies_url_list)
            proxies_url = "http://82.209.49.200:8080"

            proxies = {
                # "http": "http://*****:*****@93.127.146.106:80/",
                "http": proxies_url,
                "https": proxies_url

            }

            try:
                session = Session()
                r = session.get(url,  proxies=proxies, headers=self.headers, timeout=10)
                # r = requests.get(url,  proxies=proxies,)
                print r.status_code

                if r.status_code in [200, 301]:

                    page = r.content
                    r.cookies.clear()
                    r.close()

                    return page

                else:
                    r.cookies.clear()
                    r.close()
            except:
                pass
Example #25
0
def main4():
    # http://stackoverflow.com/questions/10247054/http-post-and-get-with-cookies-for-authentication-in-python
    # auth_url3 = "https://sso-platform.syncapse-staging.com/ajax_request/[email protected]&password=Testing."
    # response_url = "https://manage.syncapse-staging.com/posts/responses"
    s = requests.Session()
    r1 = s.get(auth_url3)
    print r1.headers

    # working
    headers = {"Cookie": r1.headers["set-cookie"] + " ; company_guid=6f65b34d-b6f4-4abd-b14a-408b8a11029b"}

    s1 = Session()
    prepped = Request(
        "GET",  # or any other method, 'POST', 'PUT', etc.
        response_url,
        # data=data,
        headers=headers
        # ...
    ).prepare()

    resp = s1.send(prepped)

    WriteToFile("/home/rtandon/Downloads/Work/importdata/TCCC_Response/Response_Time_Json_Sample3_main4.txt", resp.text)

    #    print resp.text
    print "done"
Example #26
0
    def validate(self):
        """Run validation using HTTP requests against validation host

        Using rules provided by spec, perform requests against validation host
        for each rule. Request response is verified to match the spec respsonse
        rule.  This will yield either a :py:cls:`ValidationPass` or
        :py:cls:`ValidationFail` response.
        """
        session = Session()
        if not self.verify and hasattr(urllib3, 'disable_warnings'):
            urllib3.disable_warnings()
        for rule in self.spec.get_rules():
            req = rule.get_request(self.host, self.port)
            if self.debug:
                pprint.pprint(req.__dict__)
            try:
                resp = session.send(req.prepare(), allow_redirects=False,
                                    verify=self.verify)
                if self.debug:
                    pprint.pprint(resp.__dict__)
                if rule.matches(resp):
                    yield ValidationPass(rule=rule, request=req, response=resp)
            except (ConnectionError, SSLError) as exc:
                # No response yet
                yield ValidationFail(rule=rule, request=req, response=None,
                                     error=exc)
            except ValidationError as exc:
                # Response received, validation error
                yield ValidationFail(rule=rule, request=req, response=resp,
                                     error=exc)
Example #27
0
def login(username, passwd, url=None):
	"""Login 

	>>> login('6102114000', '000000')  # doctest: +ELLIPSIS
	<requests.sessions.Session object at 0x...>


	"""
	sesion = Session()
	data = {}
	data['USERNAME'] = username
	data['PASSWORD'] = passwd
	data['useDogCode'] = ''
	data['x'] = 37
	data['y'] = 8
	ip = LOGIN_HOST [int(username)%3]
	try:
		res = sesion.post(ip % ('Logon.do?method=logon'), data=data, timeout=TIME_OUT)
		res = sesion.post(ip % ('Logon.do?method=logonBySSO'), timeout=TIME_OUT)
		if res.ok:
			return sesion
		else:
			return WRONG_USENAME
	except RequestException as error:
		return CONECTION_ERROR
Example #28
0
class AppStatsClient(object):
    limit = 100 # records
    interval = 60 # seconds
    timeout = 1 # timeout in seconds to submit data

    def __init__(self, url, app_id):
        self.url = url
        self.app_id = app_id
        self._acc = defaultdict(Counter)
        self._last_sent = time()
        self._session = Session(
            headers = {'Content-Type': 'application/json'},
            timeout = self.timeout,
        )
        self._req_count = 0

    def add(self, name, counts):
        with lock:
            self._acc[name]['NUMBER'] += 1
            for counter in counts:
                self._acc[name][counter] += counts[counter]
            self._req_count += 1

            elapsed = time() - self._last_sent
            if elapsed >= self.interval or self._req_count >= self.limit:
                self.submit()

    def submit(self):
        data = json.dumps({self.app_id: self._acc})
        try:
            self._session.post(self.url, data=data)
        except RequestException, e:
            log.debug('Error during data submission: %s' % e)
        else:
def get_http_client():
    '''
    '''
    client = Session()
    client.trust_env = False # https://github.com/kennethreitz/requests/issues/2066
    
    return client
def main():
    session = Session()
    marketReq = showOrderbookCompactRequest()
    ratesReq = showRatesRequest()

    market = Market(session.send(marketReq).json())
    rates = session.send(ratesReq).json()
    course = Course(rates)
    print "---------------------------------------------"
    print "\t[*] Asks [*]"
    print "\tvolume: %f" % market.get_BidVolume()
    print "\tmedian price: %f" % market.get_BidMedian()
    print "\tavg price: %f" % market.get_BidAverage()
    print "\tcheapest: %f" % market.get_BidLowest()
    print "\tmost expensive: %f" % market.get_BidHighest()
    print "---------------------------------------------"
    print "\t[*] Bid [*]"
    print "\tvolume: %f" % market.get_AskVolume()
    print "\tmedian price: %f" % market.get_AskMedian()
    print "\tavg price: %f" % market.get_AskAverage()
    print "\thighest bid: %f" % market.get_AskHighest()
    print "\tlowest bid: %f" % market.get_AskLowest()
    print "---------------------------------------------"
    print "\t[*] courses [*]"
    print "\texact: %f" % course.getCurrentRate()
    print "\t12 Hours: %f" % course.get12hWeighted()
    print "\t3 hours: %f" % course.get3hWeighted()
Example #31
0
def main():
    """Main function"""

    # Read configuration
    with open("config.yml", "r") as ymlfile:
        cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)

    host = cfg["deepsecurity"]["server"]
    tenant = cfg["deepsecurity"].get("tenant", None)
    c1_api_key = cfg["deepsecurity"]["c1_api_key"]
    ws_api_key = cfg["deepsecurity"]["ws_api_key"]
    timespan_from = cfg["deepsecurity"]["timespan_from"]
    timespan_to = cfg["deepsecurity"]["timespan_to"]
    tls_verify = bool(cfg["deepsecurity"].get("tls_verify", True))

    if tls_verify == False:
        _LOGGER.info("Disabling TLS verify")
        ssl._create_default_https_context = ssl._create_unverified_context
        urllib3.disable_warnings()

    _LOGGER.info("Retrieving computers...")
    computers = get_paged_computers(ws_api_key, host, verify=tls_verify)

    computers_info = add_computer_info(ws_api_key,
                                       host,
                                       computers,
                                       verify=tls_verify)
    indexed_computers = get_indexed(data=computers_info, index="id")

    session = Session()
    session.verify = tls_verify
    transport = Transport(session=session, timeout=1800)
    url = "https://{0}/webservice/Manager?WSDL".format(host)
    client = Client(url, transport=transport)
    factory = client.type_factory("ns0")

    ###
    # Scheduled Scans
    # Not fully handled job started the day before or finished after
    # midnight
    ###
    _LOGGER.info("Retrieving system events 'Scheduled Malware Scan Started'")
    sys_events = get_sys_events(
        client,
        factory,
        timespan_from,
        timespan_to,
        tenant,
        "Authorization:ApiKey",
        c1_api_key,
        indexed_computers,
        event_id=SCHEDULED_MALWARE_SCAN_STARTED,
    )
    _LOGGER.info("Retrieving system events 'Scheduled Malware Scan Completed'")
    sys_events = sys_events + get_sys_events(
        client,
        factory,
        timespan_from,
        timespan_to,
        tenant,
        "Authorization:ApiKey",
        c1_api_key,
        indexed_computers,
        event_id=SCHEDULED_MALWARE_SCAN_COMPLETED,
    )
    _LOGGER.info("Sort mixed events by 'systemEventID'")
    sys_events.sort(key=getSystemEventID)

    targetIDs = set()
    scanCount = 0
    for scan in sys_events:
        if scan["eventID"] == SCHEDULED_MALWARE_SCAN_STARTED:
            if scan["targetID"] in targetIDs:
                _LOGGER.info("Scan already active for %s", scan["targetID"])
            else:
                scanCount += 1
                targetIDs.add(scan["targetID"])
        if scan["eventID"] == SCHEDULED_MALWARE_SCAN_COMPLETED:
            if not scan["targetID"] in targetIDs:
                _LOGGER.info("No active scan found for %s", scan["targetID"])
            else:
                targetIDs.remove(scan["targetID"])

    if len(targetIDs) == 0:
        _LOGGER.info("All scans finished")
    else:
        _LOGGER.info("%d scans running", len(targetIDs))

    ###
    # Anti Malware Findings (Within scheduled scans only)
    ###
    _LOGGER.info("Retrieving anti malware events for %s scans", SCAN_TYPE)
    am_events = get_am_events(
        client,
        factory,
        timespan_from,
        timespan_to,
        tenant,
        "Authorization:ApiKey",
        c1_api_key,
        indexed_computers,
        scan_type=SCAN_TYPE,
    )

    ###
    # Creating Result Sets
    ###
    _LOGGER.info("Calculating result sets")
    # Create result set: {malware, {computers}}
    am_result = {}
    for am_event in am_events:
        if am_event["malwareName"] in am_result:
            ll = am_result.get(am_event["malwareName"])
        else:
            ll = set()
        ll.add(indexed_computers[am_event["hostID"]]["name"])
        am_result[am_event["malwareName"]] = ll

    # Create result set: {malware, str(computers)}
    am_result_list = {}
    for result in am_result:
        am_result_list[result] = ", ".join("{}".format(val)
                                           for (val) in am_result[result])

    # Populate statistics
    am_result_stats = {}
    am_result_stats["computers"] = len(computers)
    am_result_stats["scan_count"] = scanCount
    am_result_stats["scans_running"] = len(targetIDs)
    am_result_stats["no_malwares_variants"] = len(am_result)

    # Create result set: {malware, #computers}
    malware_computer_count = {}
    for am_event in am_events:
        if am_event["malwareName"] in malware_computer_count:
            ll = malware_computer_count.get(am_event["malwareName"])
        else:
            ll = set()
        ll.add(indexed_computers[am_event["hostID"]]["name"])
        malware_computer_count[am_event["malwareName"]] = ll
    for malware in malware_computer_count:
        malware_computer_count[malware] = len(malware_computer_count[malware])

    ###
    # Create Excel Report
    ###
    _LOGGER.info("Create Excel report")
    # Create a Pandas dataframe from the data.
    df = pd.DataFrame([malware_computer_count], index=["Variant Distribution"])

    # Create a Pandas Excel writer using XlsxWriter as the engine.
    excel_file = "pie.xlsx"
    sheet_name = "Variant Distribution"

    writer = pd.ExcelWriter(excel_file, engine="xlsxwriter")
    workbook = writer.book
    worksheet = workbook.add_worksheet(sheet_name)
    writer.sheets[sheet_name] = worksheet

    # Format cells.
    format_malware = workbook.add_format()  # ({'num_format': '0%'})
    format_malware.set_align("left")
    worksheet.set_column("A:A", 24, format_malware)

    format_columns = workbook.add_format()
    format_columns.set_align("right")
    worksheet.set_column("B:B", 77, format_columns)

    df.to_excel(writer, sheet_name=sheet_name, startrow=0, startcol=26)

    # Create a chart object.
    chart = workbook.add_chart({"type": "pie"})
    chart.set_size({"width": 720, "height": 576})
    chart.set_style(10)
    chart.set_title({"name": "Malware Variants"})

    # Configure the chart from the dataframe data.
    chart.add_series({
        "categories":
        "='{}'!AB1:A{}1".format(
            sheet_name, chr(65 + am_result_stats["no_malwares_variants"])),
        "values":
        "='{}'!AB2:A{}2".format(
            sheet_name, chr(65 + am_result_stats["no_malwares_variants"])),
    })

    # Insert the chart into the worksheet.
    worksheet.insert_chart("A1", chart)

    # Insert statistics table
    ds = pd.Series(data=am_result_stats)
    ds.to_excel(writer,
                sheet_name=sheet_name,
                startrow=30,
                startcol=0,
                header=False)

    # Insert malware variant discovered on computer table
    ds = pd.Series(data=am_result_list, dtype='object')
    ds.to_excel(writer,
                sheet_name=sheet_name,
                startrow=35,
                startcol=0,
                header=False)

    # Close the Pandas Excel writer and output the Excel file.
    writer.save()
Example #32
0
def use_wrapper():
    print("Using helper")
    sess = CacheControl(Session())
    return sess
Example #33
0

if __name__ == '__main__':
    # 待下载ytb的url
    # todo: 看看到时转成settings.py的方式,然后这里用python的命令行解析包解析cmd参数
    # url = 'https://www.youtube.com/watch?v=Vyp5_F42NGs'
    filepath = '/Users/zhangyue/Downloads'
    # proxy = 'socks5://127.0.0.1:1080'
    proxy = None
    headers = {
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) '
                      'AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/60.0.3112.90 Safari/537.36',
        'connection': 'keep-alive',
    }
    session = Session()
    session.headers = headers
    if proxy:
        proxies = {
            'http': proxy,
            'https': proxy,
        }
        session.proxies = proxies

    urls_handler(session)

    urls_list = []
    with open('handled_url.txt') as f:
        for url in f:
            if url:
                urls_list.append(url)
 def initialize(self):
     self.session = Session()
     self.search_url = self.get_sub_domain()
     self.token = None
     self.login()
class BSPlayerProvider(Provider):
    """BSPlayer Provider."""
    languages = {Language('por', 'BR')} | {
        Language(l)
        for l in [
            'ara', 'bul', 'ces', 'dan', 'deu', 'ell', 'eng', 'fin', 'fra',
            'hun', 'ita', 'jpn', 'kor', 'nld', 'pol', 'por', 'ron', 'rus',
            'spa', 'swe', 'tur', 'ukr', 'zho'
        ]
    }
    SEARCH_THROTTLE = 8
    hash_verifiable = True

    # batantly based on kodi's bsplayer plugin
    # also took from BSPlayer-Subtitles-Downloader
    def __init__(self):
        self.initialize()

    def initialize(self):
        self.session = Session()
        self.search_url = self.get_sub_domain()
        self.token = None
        self.login()

    def terminate(self):
        self.session.close()
        self.logout()

    def api_request(self, func_name='logIn', params='', tries=5):
        headers = {
            'User-Agent':
            'BSPlayer/2.x (1022.12360)',
            'Content-Type':
            'text/xml; charset=utf-8',
            'Connection':
            'close',
            'SOAPAction':
            '"http://api.bsplayer-subtitles.com/v1.php#{func_name}"'.format(
                func_name=func_name)
        }
        data = (
            '<?xml version="1.0" encoding="UTF-8"?>\n'
            '<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" '
            'xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" '
            'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" '
            'xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:ns1="{search_url}">'
            '<SOAP-ENV:Body SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
            '<ns1:{func_name}>{params}</ns1:{func_name}></SOAP-ENV:Body></SOAP-ENV:Envelope>'
        ).format(search_url=self.search_url,
                 func_name=func_name,
                 params=params)
        logger.info('Sending request: %s.' % func_name)
        for i in iter(range(tries)):
            try:
                self.session.headers.update(headers.items())
                res = self.session.post(self.search_url, data)
                return ElementTree.fromstring(res.text)

            except Exception as ex:
                logger.info("ERROR: %s." % ex)
                if func_name == 'logIn':
                    self.search_url = self.get_sub_domain()

                sleep(1)
        logger.info('ERROR: Too many tries (%d)...' % tries)
        raise Exception('Too many tries...')

    def login(self):
        # If already logged in
        if self.token:
            return True

        root = self.api_request(func_name='logIn',
                                params=('<username></username>'
                                        '<password></password>'
                                        '<AppID>BSPlayer v2.67</AppID>'))
        res = root.find('.//return')
        if res.find('status').text == 'OK':
            self.token = res.find('data').text
            logger.info("Logged In Successfully.")
            return True
        return False

    def logout(self):
        # If already logged out / not logged in
        if not self.token:
            return True

        root = self.api_request(
            func_name='logOut',
            params='<handle>{token}</handle>'.format(token=self.token))
        res = root.find('.//return')
        self.token = None
        if res.find('status').text == 'OK':
            logger.info("Logged Out Successfully.")
            return True
        return False

    def query(self, video, video_hash, language):
        if not self.login():
            return []

        if isinstance(language, (tuple, list, set)):
            # language_ids = ",".join(language)
            # language_ids = 'spa'
            language_ids = ','.join(sorted(l.opensubtitles for l in language))

        if video.imdb_id is None:
            imdbId = '*'
        else:
            imdbId = video.imdb_id
        sleep(self.SEARCH_THROTTLE)
        root = self.api_request(
            func_name='searchSubtitles',
            params=('<handle>{token}</handle>'
                    '<movieHash>{movie_hash}</movieHash>'
                    '<movieSize>{movie_size}</movieSize>'
                    '<languageId>{language_ids}</languageId>'
                    '<imdbId>{imdbId}</imdbId>').format(
                        token=self.token,
                        movie_hash=video_hash,
                        movie_size=video.size,
                        language_ids=language_ids,
                        imdbId=imdbId))
        res = root.find('.//return/result')
        if res.find('status').text != 'OK':
            return []

        items = root.findall('.//return/data/item')
        subtitles = []
        if items:
            logger.info("Subtitles Found.")
            for item in items:
                subID = item.find('subID').text
                subDownloadLink = item.find('subDownloadLink').text
                subLang = Language.fromopensubtitles(item.find('subLang').text)
                subName = item.find('subName').text
                subFormat = item.find('subFormat').text
                subtitles.append(
                    BSPlayerSubtitle(subLang, subName, subFormat, video,
                                     subDownloadLink))
        return subtitles

    def list_subtitles(self, video, languages):
        return self.query(video, video.hashes['bsplayer'], languages)

    def get_sub_domain(self):
        # s1-9, s101-109
        SUB_DOMAINS = [
            's1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9', 's101',
            's102', 's103', 's104', 's105', 's106', 's107', 's108', 's109'
        ]
        API_URL_TEMPLATE = "http://{sub_domain}.api.bsplayer-subtitles.com/v1.php"
        sub_domains_end = len(SUB_DOMAINS) - 1
        return API_URL_TEMPLATE.format(
            sub_domain=SUB_DOMAINS[random.randint(0, sub_domains_end)])

    def download_subtitle(self, subtitle):
        session = Session()
        _addheaders = {'User-Agent': 'Mozilla/4.0 (compatible; Synapse)'}
        session.headers.update(_addheaders)
        res = session.get(subtitle.page_link)
        if res:
            if res.text == '500':
                raise ValueError('Error 500 on server')

            with gzip.GzipFile(fileobj=io.BytesIO(res.content)) as gf:
                subtitle.content = gf.read()
                subtitle.normalize()

            return subtitle
        raise ValueError('Problems conecting to the server')
Example #36
0
limitations under the License.
'''

from zeep import Client
from requests import Session
from zeep.transports import Transport
from getpass import getpass

#Parameters
BAMAddress = "bam.lab.corp"
url = "https://" + BAMAddress + "/Services/API?wsdl"
account = "api"
account_password = getpass("Enter Password: "******"bam.crt"
webtransport = Transport(session=websession)
client = Client(url, transport=webtransport)

searchTerm = "192.168.2"

#login to api session
client.service.login(account, account_password)

#APi calls
searchresults = client.service.searchByObjectTypes(searchTerm,"IP4Address", \
                                                    0, 100)
for items in searchresults:
    if "GATEWAY" in items.properties:
Example #37
0
 def __init__(self, service):
     self.service = service
     Session.__init__(self)
Example #38
0
def use_adapter():
    print("Using adapter")
    sess = Session()
    sess.mount("http://", CacheControlAdapter())
    return sess
Example #39
0
 def __init__(self):
     self.session = Session()
Example #40
0
class API:
    _ID = None  # First line
    _SECRET = None  # Second line
    _TOKEN_URL = "https://accounts.spotify.com/api/token"
    _SEARCH_URL = "https://api.spotify.com/v1/search"
    _FEATURES_URL = "https://api.spotify.com/v1/audio-features/"
    _ANALYSIS_URL = "https://api.spotify.com/v1/audio-analysis/"

    _MAX_IDS = 100

    def __init__(self):
        self.session = Session()

    def __list_split(self, split: list, n: int) -> list:
        """
        Split the given list every nth element
        Args:
            split: the list to split
            n: the size of splitted lists

        Returns:
            The lists splitted
        """
        for i in range(0, len(split), n):
            # Create an index range for l of n items:
            yield split[i:i + n]

    def __ids_assembler(self, tracks: List[Song]) -> str:
        """
        Given a list of Song Objects, extract spotify ids and assemble them in a string
        Args:
            tracks: a list of Song Objects

        Returns:
            A string with every element of the list separated with a comma
        """
        out = ""
        for track in tracks:
            out += f"{track['spotify_id']},"
        return out[:-1]  # removes the trailing comma

    def key_parse(self, keyfile: str) -> None:
        """
        Load the client id and the client secret from the .key file
        Returns:
            None
        """
        logging.debug(f"Parsing {keyfile}")
        with open(keyfile, "r") as keyfile:
            lines = keyfile.readlines()

            # File Sanitizing
            if len(lines) != 2:
                logging.warning(f"{keyfile} should have 2 lines")

            for _, line in zip(range(2), lines):
                if len(line) != 33:  # 32 + \n
                    logging.critical(
                        "Client id and client secret must have 32 chars")
                    raise ValueError(
                        "Client id and client secret must have 32 chars")

            self._ID = lines[0].replace("\n", "")
            self._SECRET = lines[1].replace("\n", "")

    def auth(self, keyfile: str = None) -> "self":
        """
        Get an oauth token from the Spotify Web API
        Returns:
            self
        """
        if keyfile is not None:
            self.key_parse(keyfile)

        if self._ID is None or self._SECRET is None:
            logging.error("Client ID and Client Secret cannot be None")
            raise ValueError("Client ID and Client Secret cannot be None")

        auth_str = bytes(f"{self._ID}:{self._SECRET}", 'utf-8')
        auth_b64 = base64.b64encode(auth_str).decode('utf-8')
        headers = {"Authorization": f"Basic {auth_b64}"}
        body = {"grant_type": "client_credentials"}

        response = post(url=self._TOKEN_URL, headers=headers, data=body)
        data = response.json()

        token_header = {"Authorization": f"Bearer {data['access_token']}"}

        self.session.headers = token_header

        return self

    def search(self, track: Song, isrc: bool = False) -> None:
        """
        Search a song using the Spotify Web API
        Args:
            track: the Song Object representing the track to search
            isrc: use or not the isrc to search the song on spotify

        Returns:
            None
        """
        if isrc and track['isrc'] is not None:
            query = f"isrc:{track['isrc']}&type=track"
        else:
            query = f"{track['title']}%20artist:{track['artist']}%20year:{track['year']}&type=track"
        url = f"{self._SEARCH_URL}?q={query}"

        response = self.session.get(url=url)
        if response.status_code != 200:
            logging.warning(
                f"Search request failed. Status = {response.status_code} - Url = {url} - Response = "
                f"{response.content}")
            raise ValueError(f"Search request failed.")

        spotify_id = response.json()["tracks"]["items"][0]["id"]

        if spotify_id is None or len(spotify_id) != 22:
            logging.critical(
                f"id is wrong for {track['title']} - {track['artist']}")

        track["spotify_id"] = spotify_id

    def feature_bulk(self, tracks: List[Song]) -> None:
        """
        Get the song features for a list of Song Objects
        Args:
            tracks: a list of Song Objects

        Returns:
            None
        """
        tracks_chunks = self.__list_split(tracks, self._MAX_IDS)

        for chunk in tracks_chunks:
            self.features(chunk)

    def features(self, tracks: List[Song]) -> None:
        """
        Get the song features for a list of ids with a max size of 100 tracks
        Args:
            tracks: a list of Song Objects with at maximum 100 elements

        Returns:
            None
        """
        path = self.__ids_assembler(tracks)
        url = f"{self._FEATURES_URL}?ids={path}"

        response = self.session.get(url=url)
        if response.status_code != 200:
            logging.warning(
                f"Features request failed. Status = {response.status_code} - Url = {url} - Response = "
                f"{response.content}")
            raise ValueError(f"Feature request failed.")

        data = response.json()["audio_features"]

        for i, track in enumerate(tracks):
            feature = data[i]

            if feature is None:
                # Remove not song with not found features
                tracks.remove(track)
                data.pop(i)
                # log it
                logging.error(
                    f"Features for {track['title']} - {track['artist']} not found"
                )
                continue

            track.set_features(feature)
Example #41
0
def site_session():

    s = Session()
    s.get(ENTRY_URL)
    return s
Example #42
0
# =========================================================================================

from requests import Session, get
from bs4 import BeautifulSoup
from json import loads, load, dumps
from sys import argv
from os import chdir
from os.path import dirname, abspath
from re import findall

# Нужно для запуска не из папки со скриптом
# (сменяет рабочую директорию на директорию скрипта, для нормального взаимодействия с конфигом)
chdir(dirname(abspath(__file__)))

# Инициализация веб клиента
session = Session()
headers = {
    'Connection':
    'keep-alive',
    'User-Agent':
    'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}
domain = 'https://my.yota.ru'
url_tariff_page = 'https://my.yota.ru/selfcare/devices'
access_page = 'http://hello.yota.ru/light/'


# Функция авто-продления бесплатного периода
def yota_get_acces():
    log('blocking_check')
    try:
Example #43
0
class ItaSAProvider(Provider):
    languages = {Language('ita')}

    video_types = (Episode, )

    server_url = 'https://api.italiansubs.net/api/rest/'

    apikey = 'd86ad6ec041b334fac1e512174ee04d5'

    def __init__(self, username=None, password=None):
        if username is not None and password is None or username is None and password is not None:
            raise ConfigurationError('Username and password must be specified')

        self.username = username
        self.password = password
        self.logged_in = False
        self.login_itasa = False
        self.session = None
        self.auth_code = None

    def initialize(self):
        self.session = Session()
        self.session.headers['User-Agent'] = 'Subliminal/%s' % __version__

        # login
        if self.username is not None and self.password is not None:
            logger.info('Logging in')
            params = {
                'username': self.username,
                'password': self.password,
                'apikey': self.apikey
            }

            r = self.session.get(self.server_url + 'users/login',
                                 params=params,
                                 allow_redirects=False,
                                 timeout=10)
            root = etree.fromstring(r.content)

            if root.find('status').text == 'fail':
                raise AuthenticationError(root.find('error/message').text)

            self.auth_code = root.find('data/user/authcode').text

            data = {
                'username': self.username,
                'passwd': self.password,
                'remember': 'yes',
                'option': 'com_user',
                'task': 'login',
                'silent': 'true'
            }
            r = self.session.post('http://www.italiansubs.net/index.php',
                                  data=data,
                                  allow_redirects=False,
                                  timeout=30)
            r.raise_for_status()

            self.logged_in = True

    def terminate(self):
        self.session.close()
        self.logged_in = False

    @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
    def _get_show_ids(self):
        """Get the ``dict`` of show ids per series by querying the `shows` page.

        :return: show id per series, lower case and without quotes.
        :rtype: dict

        """
        # get the show page
        logger.info('Getting show ids')
        params = {'apikey': self.apikey}
        r = self.session.get(self.server_url + 'shows',
                             timeout=10,
                             params=params)
        r.raise_for_status()
        root = etree.fromstring(r.content)

        # populate the show ids
        show_ids = {}
        for show in root.findall('data/shows/show'):
            if show.find('name').text is None:  # pragma: no cover
                continue
            show_ids[sanitize(show.find('name').text).lower()] = int(
                show.find('id').text)
        logger.debug('Found %d show ids', len(show_ids))

        return show_ids

    @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
    def _search_show_id(self, series):
        """Search the show id from the `series`

        :param str series: series of the episode.
        :return: the show id, if found.
        :rtype: int or None

        """
        # build the param
        params = {'apikey': self.apikey, 'q': series}

        # make the search
        logger.info('Searching show ids with %r', params)
        r = self.session.get(self.server_url + 'shows/search',
                             params=params,
                             timeout=10)
        r.raise_for_status()
        root = etree.fromstring(r.content)

        if int(root.find('data/count').text) == 0:
            logger.warning('Show id not found: no suggestion')
            return None

        # Looking for show in first page
        for show in root.findall('data/shows/show'):
            if sanitize(show.find('name').text).lower() == sanitize(
                    series.lower()):
                show_id = int(show.find('id').text)
                logger.debug('Found show id %d', show_id)

                return show_id

        # Not in the first page of result try next (if any)
        next_page = root.find('data/next')
        while next_page.text is not None:  # pragma: no cover

            r = self.session.get(next_page.text, timeout=10)
            r.raise_for_status()
            root = etree.fromstring(r.content)

            logger.info('Loading suggestion page %r',
                        root.find('data/page').text)

            # Looking for show in following pages
            for show in root.findall('data/shows/show'):
                if sanitize(show.find('name').text).lower() == sanitize(
                        series.lower()):
                    show_id = int(show.find('id').text)
                    logger.debug('Found show id %d', show_id)

                    return show_id

            next_page = root.find('data/next')

        # No matches found
        logger.warning('Show id not found: suggestions does not match')

        return None

    def get_show_id(self, series, country_code=None):
        """Get the best matching show id for `series`.

        First search in the result of :meth:`_get_show_ids` and fallback on a search with :meth:`_search_show_id`

        :param str series: series of the episode.
        :param str country_code: the country in which teh show is aired.
        :return: the show id, if found.
        :rtype: int or None

        """
        series_sanitized = sanitize(series).lower()
        show_ids = self._get_show_ids()
        show_id = None

        # attempt with country
        if not show_id and country_code:
            logger.debug('Getting show id with country')
            show_id = show_ids.get('%s %s' %
                                   (series_sanitized, country_code.lower()))

        # attempt clean
        if not show_id:
            logger.debug('Getting show id')
            show_id = show_ids.get(series_sanitized)

        # search as last resort
        if not show_id:
            logger.warning('Series not found in show ids')
            show_id = self._search_show_id(series)

        return show_id

    def _download_zip(self, sub_id):
        # download the subtitle
        logger.info('Downloading subtitle %r', sub_id)

        params = {
            'authcode': self.auth_code,
            'apikey': self.apikey,
            'subtitle_id': sub_id
        }

        r = self.session.get(self.server_url + 'subtitles/download',
                             params=params,
                             timeout=30)
        r.raise_for_status()

        return r.content

    def _get_season_subtitles(self, show_id, season, sub_format):
        params = {
            'apikey': self.apikey,
            'show_id': show_id,
            'q': 'Stagione %d' % season,
            'version': sub_format
        }
        r = self.session.get(self.server_url + 'subtitles/search',
                             params=params,
                             timeout=30)
        r.raise_for_status()
        root = etree.fromstring(r.content)

        if int(root.find('data/count').text) == 0:
            logger.warning('Subtitles for season not found')
            return []

        subs = []
        # Looking for subtitles in first page
        for subtitle in root.findall('data/subtitles/subtitle'):
            if 'stagione %d' % season in subtitle.find('name').text.lower():
                logger.debug('Found season zip id %d - %r - %r',
                             int(subtitle.find('id').text),
                             subtitle.find('name').text,
                             subtitle.find('version').text)

                content = self._download_zip(int(subtitle.find('id').text))
                if not is_zipfile(io.BytesIO(content)):  # pragma: no cover
                    if 'limite di download' in content:
                        raise TooManyRequests()
                    else:
                        raise ConfigurationError('Not a zip file: %r' %
                                                 content)

                with ZipFile(io.BytesIO(content)) as zf:
                    episode_re = re.compile('s(\d{1,2})e(\d{1,2})')
                    for index, name in enumerate(zf.namelist()):
                        match = episode_re.search(name)
                        if not match:  # pragma: no cover
                            logger.debug('Cannot decode subtitle %r', name)
                        else:
                            sub = ItaSASubtitle(
                                int(subtitle.find('id').text),
                                subtitle.find('show_name').text,
                                int(match.group(1)), int(match.group(2)), None,
                                None, None, name)
                            sub.content = fix_line_ending(zf.read(name))
                            subs.append(sub)

        return subs

    def query(self,
              series,
              season,
              episode,
              video_format,
              resolution,
              country=None):

        # To make queries you need to be logged in
        if not self.logged_in:  # pragma: no cover
            raise ConfigurationError('Cannot query if not logged in')

        # get the show id
        show_id = self.get_show_id(series, country)
        if show_id is None:
            logger.error('No show id found for %r ', series)
            return []

        # get the page of the season of the show
        logger.info(
            'Getting the subtitle of show id %d, season %d episode %d, format %r',
            show_id, season, episode, video_format)
        subtitles = []

        # Default format is SDTV
        if not video_format or video_format.lower() == 'hdtv':
            if resolution in ('1080i', '1080p', '720p'):
                sub_format = resolution
            else:
                sub_format = 'normale'
        else:
            sub_format = video_format.lower()

        # Look for year
        params = {'apikey': self.apikey}
        r = self.session.get(self.server_url + 'shows/' + str(show_id),
                             params=params,
                             timeout=30)
        r.raise_for_status()
        root = etree.fromstring(r.content)

        year = root.find('data/show/started').text
        if year:
            year = int(year.split('-', 1)[0])
        tvdb_id = root.find('data/show/id_tvdb').text
        if tvdb_id:
            tvdb_id = int(tvdb_id)

        params = {
            'apikey': self.apikey,
            'show_id': show_id,
            'q': '%dx%02d' % (season, episode),
            'version': sub_format
        }
        r = self.session.get(self.server_url + 'subtitles/search',
                             params=params,
                             timeout=30)
        r.raise_for_status()
        root = etree.fromstring(r.content)

        if int(root.find('data/count').text) == 0:
            logger.warning('Subtitles not found')
            # If no subtitle are found for single episode try to download all season zip
            subs = self._get_season_subtitles(show_id, season, sub_format)
            if subs:
                for subtitle in subs:
                    subtitle.format = video_format
                    subtitle.year = year
                    subtitle.tvdb_id = tvdb_id

                return subs
            else:
                return []

        # Looking for subtitles in first page
        for subtitle in root.findall('data/subtitles/subtitle'):
            if '%dx%02d' % (season,
                            episode) in subtitle.find('name').text.lower():

                logger.debug('Found subtitle id %d - %r - %r',
                             int(subtitle.find('id').text),
                             subtitle.find('name').text,
                             subtitle.find('version').text)

                sub = ItaSASubtitle(int(subtitle.find('id').text),
                                    subtitle.find('show_name').text, season,
                                    episode, video_format, year, tvdb_id,
                                    subtitle.find('name').text)

                subtitles.append(sub)

        # Not in the first page of result try next (if any)
        next_page = root.find('data/next')
        while next_page.text is not None:  # pragma: no cover

            r = self.session.get(next_page.text, timeout=30)
            r.raise_for_status()
            root = etree.fromstring(r.content)

            logger.info('Loading subtitles page %r', root.data.page.text)

            # Looking for show in following pages
            for subtitle in root.findall('data/subtitles/subtitle'):
                if '%dx%02d' % (season,
                                episode) in subtitle.find('name').text.lower():

                    logger.debug('Found subtitle id %d - %r - %r',
                                 int(subtitle.find('id').text),
                                 subtitle.find('name').text,
                                 subtitle.find('version').text)

                    sub = ItaSASubtitle(int(subtitle.find('id').text),
                                        subtitle.find('show_name').text,
                                        season, episode, video_format, year,
                                        tvdb_id,
                                        subtitle.find('name').text)

                    subtitles.append(sub)

            next_page = root.find('data/next')

        # Download the subs found, can be more than one in zip
        additional_subs = []
        for sub in subtitles:

            # open the zip
            content = self._download_zip(sub.sub_id)
            if not is_zipfile(io.BytesIO(content)):  # pragma: no cover
                if 'limite di download' in content:
                    raise TooManyRequests()
                else:
                    raise ConfigurationError('Not a zip file: %r' % content)

            with ZipFile(io.BytesIO(content)) as zf:
                if len(zf.namelist()) > 1:  # pragma: no cover

                    for index, name in enumerate(zf.namelist()):

                        if index == 0:
                            # First element
                            sub.content = fix_line_ending(zf.read(name))
                            sub.full_data = name
                        else:
                            add_sub = copy.deepcopy(sub)
                            add_sub.content = fix_line_ending(zf.read(name))
                            add_sub.full_data = name
                            additional_subs.append(add_sub)
                else:
                    sub.content = fix_line_ending(zf.read(zf.namelist()[0]))
                    sub.full_data = zf.namelist()[0]

        return subtitles + additional_subs

    def list_subtitles(self, video, languages):
        return self.query(video.series, video.season, video.episode,
                          video.format, video.resolution)

    def download_subtitle(self, subtitle):  # pragma: no cover
        pass
Example #44
0
                    result = function(url, match, session) or []
                    queue.extend(result)

if __name__ == '__main__':
    import json
    from collections import defaultdict

    config = json.load(open('configuration.json'))

    root = config['root']
    str_rules = config['rules']
    rules = defaultdict(list)
    for key, value in str_rules.items():
        compiled_key = re.compile(key)

        if type(value) != list:
            value = [value]

        for function_name in value:
            function = globals()[function_name]
            rules[compiled_key].append(function)

    session = Session()

    if 'login_url' in config:
        login_data = config['login_data']
        r = session.post(config['login_url'], data=login_data)

    run(root, rules, session)

Example #45
0
 def initialize(self):
     self.session = Session()
     self.session.headers[
         'User-Agent'] = 'Subliminal/%s' % __short_version__
Example #46
0
class AssrtProvider(Provider):
    """Assrt Provider."""
    languages = {Language(*l) for l in supported_languages}
    video_types = (Episode, Movie)

    def __init__(self, token=None):
        if not token:
            raise ConfigurationError('Token must be specified')
        self.token = token

    def initialize(self):
        self.session = Session()
        self.session.headers = {
            'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")
        }

    def terminate(self):
        self.session.close()

    def query(self, languages, video):
        # query the server
        keywords = []
        if isinstance(video, Movie):
            if video.title:
                # title = "".join(e for e in video.title if e.isalnum())
                title = video.title
                keywords.append(title)
            if video.year:
                keywords.append(str(video.year))
        elif isinstance(video, Episode):
            if video.series:
                # series = "".join(e for e in video.series if e.isalnum())
                series = video.series
                keywords.append(series)
            if video.season and video.episode:
                keywords.append('S%02dE%02d' % (video.season, video.episode))
            elif video.episode:
                keywords.append('E%02d' % video.episode)
        query = ' '.join(keywords)

        params = {'token': self.token, 'q': query, 'is_file': 1}
        logger.debug('Searching subtitles: GET /sub/search %r', params)
        res = self.session.get(server_url + '/sub/search',
                               params=params,
                               timeout=10)
        res.raise_for_status()
        result = res.json()

        if result['status'] != 0:
            logger.error('status error: %r', result['status'])
            return []

        # parse the subtitles
        pattern = re.compile(r'lang(?P<code>\w+)')
        subtitles = []
        for sub in result['sub']['subs']:
            if 'lang' not in sub:
                continue
            for key in sub['lang']['langlist'].keys():
                match = pattern.match(key)
                try:
                    language = Language.fromassrt(match.group('code'))
                    output_language = search_language_in_list(
                        language, languages)
                    if output_language:
                        subtitles.append(
                            AssrtSubtitle(output_language, sub['id'],
                                          sub['videoname'], self.session,
                                          self.token))
                except:
                    pass

        return subtitles

    def list_subtitles(self, video, languages):
        return self.query(languages, video)

    def download_subtitle(self, subtitle):
        r = self.session.get(subtitle.download_link, timeout=10)
        r.raise_for_status()

        subtitle.content = fix_line_ending(r.content)
Example #47
0
class ToastSMS:
    """NHN TOAST SMS(MMS) Wrapper
    """
    def __init__(self, app_key, version='v2.2'):
        """Toast SMS initialze with app key and version.

        :param app_key: Toast API App Key
        :param version: Toast API version Default `v2.2`.
        :returns: None.
        """
        base = 'https://api-sms.cloud.toast.com'
        self.domain = '/'.join([base, 'sms', version, 'appKeys', app_key])
        self.basic_validator = Draft7Validator(REQUEST_SCHEMA)
        self.tag_validator = Draft7Validator(TAG_SCHEMA)
        self.upload_validator = Draft7Validator(UPLOAD_SCHEMA)
        self.query_validator = Draft7Validator(QUERY_SCHEMA)
        self.category_validator = Draft7Validator(CATEGORY_SCHEMA)
        self.template_validator = Draft7Validator(TEMPLATE_SCHEMA)
        self.session = Session()

    def call(self, end_point, method='get', params=None, json=None):
        """Call API Method via Requests Session.
        :param end_point: end_point represented like `/sender/mms`.
        :param method: http methods(GET, POST, PUT, DELETE, HEAD etc)
        :param params: url paramters.
        :param json: json object.
        :returns: session Requests object.
        """
        if end_point[0] == '/':
            url = self.domain + end_point
        else:
            url = self.domain + '/' + end_point

        headers = {'Content-Type': 'application/json;charset=UTF-8'}

        return self.session.request(method,
                                    url,
                                    headers=headers,
                                    params=params,
                                    json=json)

    def add_category(self, json):
        """Add Category to Toast Cloud SMS
        :param json: json object.
        :returns: post result.
        """
        self.category_validator.validate(json)
        res = self.call('/categories', 'post', json=json)
        res.raise_for_status()
        return res.json()

    def inquiry_category(self, params=None, category_id=None):
        """Inquiry Category to Toast Cloud SMS
        :param params: url parameter.
        :param category_id: category id.
        :returns: queried result.
        """
        if category_id:
            res = self.call('/categories/{0}'.format(category_id))
        else:
            res = self.call('/categories', params=params)
        res.raise_for_status()
        return res.json()

    def update_category(self, category_id, json):
        """Update Category to Toast Cloud SMS
        :param category_id: will update category_id
        :param json: json object.
        :returns: put result.
        """
        self.category_validator.validate(json)
        end_point = '/categories/{0}'.format(category_id)
        res = self.call(end_point, 'put', json=json)
        res.raise_for_status()
        return res.json()

    def delete_category(self, category_id):
        """Delete Category from Toast Cloud SMS
        :param category_id: will delete category_id
        :param json: json object.
        :returns: delete result.
        """
        res = self.call('/categories/{0}'.format(category_id), 'delete')
        res.raise_for_status()
        return res.json()

    def add_template(self, json, category_id=0):
        """Add Template to Toast Cloud SMS.
        :param json: json object.
        :param category_id: Only need for not passed category id via json.
        :returns: post result.
        """
        if 'categoryId' not in json:
            json['categoryId'] = category_id

        self.template_validator.validate(json)
        res = self.call('/templates', 'post', json=json)
        res.raise_for_status()
        return res.json()

    def inquiry_template(self, params=None, template_id=None):
        """Inquiry template to Toast Cloud SMS
        :param params: url parameter(categoryId, useYn, pageSize, pageNum, totalCount)
        :returns: queried result.
        """
        if template_id:
            res = self.call('/templates/{0}'.format(template_id))
        else:
            res = self.call('/templates', params=params)
        res.raise_for_status()
        return res.json()

    def update_template(self, template_id, json):
        """Update Template to Toast Cloud SMS
        :param template_id: will update template_id
        :param json: json object.
        :returns: put result.
        """
        self.template_validator.validate(json)
        end_point = '/templates/{0}'.format(template_id)
        res = self.call(end_point, 'put', json=json)
        res.raise_for_status()
        return res.json()

    def delete_template(self, template_id):
        """Delete Category from Toast Cloud SMS
        :param template_id: will delete template_id
        :param json: json object.
        :returns: delete result.
        """
        res = self.call('/templates/{0}'.format(template_id), 'delete')
        res.raise_for_status()
        return res.json()

    def send_message(self, json):
        """Send Message via API(MMS, SMS)
        :param json: json object.
        :returns: post result.
        """
        self.basic_validator.validate(json)
        send_type = json.pop('sendType')
        end_point = '/sender/{0}'.format(send_type)
        res = self.call(end_point, 'post', json=json)
        res.raise_for_status()
        return res.json()

    def inquiry_sent_result(self, params):
        """Inquiry sent Message Result from API(MMS, SMS)
        :param json: json object.
        :returns: json object about queried set.
        """
        self.query_validator.validate(params)
        send_type = params.pop('sendType')
        end_point = '/sender/{0}'.format(send_type)
        res = self.call(end_point, params=params)
        res.raise_for_status()
        return res.json()

    def send_tag_message(self, json):
        """Send Tag Message to API(MMS, SMS)
        :param json: json object.
        :returns: post result.
        """
        self.tag_validator.validate(json)
        send_type = json.pop('sendType')
        end_point = '/tag-sender/{0}'.format(send_type)
        res = self.call(end_point, 'post', json=json)
        res.raise_for_status()
        return res.json()
Example #48
0
 def initialize(self):
     self.session = Session()
     self.session.headers = {
         'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")
     }
def get_file_list(url: str, session: requests.Session):
    ret = session.get(url)
    ret.raise_for_status()
    parser = CustomHTMLParser()
    parser.feed(ret.text)
    return parser.file_list[1:]
Example #50
0
class Bybit():
    url_main = 'https://api.bybit.com'
    url_test = 'https://api-testnet.bybit.com'
    ws_url_main = 'wss://stream.bybit.com/realtime'
    ws_url_test = 'wss://stream-testnet.bybit.com/realtime'
    headers = {'Content-Type': 'application/json'}

    def __init__(self, api_key, secret, symbol, ws=True, test=False):
        self.api_key = api_key
        self.secret = secret

        self.symbol = symbol
        self.s = Session()
        self.s.headers.update(self.headers)

        self.url = self.url_main if not test else self.url_test
        self.ws_url = self.ws_url_main if not test else self.ws_url_test

        self.ws = ws
        if ws:
            self._connect()

    #
    # WebSocket
    #

    def _connect(self):
        self.ws = WebSocketApp(url=self.ws_url,
                               on_open=self._on_open,
                               on_message=self._on_message)

        self.ws_data = {
            'trade.' + str(self.symbol): deque(maxlen=200),
            'instrument.' + str(self.symbol): {},
            'order_book_25L1.' + str(self.symbol): pd.DataFrame(),
            'position': {},
            'execution': deque(maxlen=200),
            'order': deque(maxlen=200)
        }

        positions = self.get_position_http()['result']
        print(
            'Positions ----------------------------------------------------------'
        )
        print(positions)
        if positions is None:
            print('returned list is None')
        else:
            print('returned list is not None')
            for p in positions:
                if p['symbol'] == self.symbol:
                    self.ws_data['position'].update(p)
                    break

        Thread(target=self.ws.run_forever, daemon=True).start()

    def _on_open(self):
        timestamp = int(time.time() * 1000)
        param_str = 'GET/realtime' + str(timestamp)
        sign = hmac.new(self.secret.encode('utf-8'), param_str.encode('utf-8'),
                        hashlib.sha256).hexdigest()

        self.ws.send(
            json.dumps({
                'op': 'auth',
                'args': [self.api_key, timestamp, sign]
            }))
        self.ws.send(
            json.dumps({
                'op':
                'subscribe',
                'args': [
                    'trade.' + str(self.symbol),
                    'instrument.' + str(self.symbol),
                    'order_book_25L1.' + str(self.symbol), 'position',
                    'execution', 'order'
                ]
            }))

    def _on_message(self, message):
        message = json.loads(message)
        topic = message.get('topic')
        if topic == 'order_book_25L1.' + str(self.symbol):
            if message['type'] == 'snapshot':
                self.ws_data[topic] = pd.io.json.json_normalize(
                    message['data']).set_index('id').sort_index(
                        ascending=False)
            else:  # message['type'] == 'delta'
                # delete or update or insert
                if len(message['data']['delete']) != 0:
                    drop_list = [x['id'] for x in message['data']['delete']]
                    self.ws_data[topic].drop(index=drop_list)
                elif len(message['data']['update']) != 0:
                    update_list = pd.io.json.json_normalize(
                        message['data']['update']).set_index('id')
                    self.ws_data[topic].update(update_list)
                    self.ws_data[topic] = self.ws_data[topic].sort_index(
                        ascending=False)
                elif len(message['data']['insert']) != 0:
                    insert_list = pd.io.json.json_normalize(
                        message['data']['insert']).set_index('id')
                    self.ws_data[topic].update(insert_list)
                    self.ws_data[topic] = self.ws_data[topic].sort_index(
                        ascending=False)

        elif topic in ['trade.' + str(self.symbol), 'execution', 'order']:
            self.ws_data[topic].append(message['data'][0])

        elif topic in ['instrument.' + str(self.symbol), 'position']:
            self.ws_data[topic].update(message['data'][0])

    def get_trade(self):
        if not self.ws: return None

        return self.ws_data['trade.' + str(self.symbol)]

    def get_instrument(self):
        if not self.ws: return None
        while len(self.ws_data['instrument.' + str(self.symbol)]) != 4:
            time.sleep(1.0)

        return self.ws_data['instrument.' + str(self.symbol)]

    def get_orderbook(self, side=None):
        if not self.ws: return None
        while self.ws_data['order_book_25L1.' + str(self.symbol)].empty:
            time.sleep(1.0)

        if side == 'Sell':
            orderbook = self.ws_data['order_book_25L1.' +
                                     str(self.symbol)].query(
                                         'side.str.contains("Sell")',
                                         engine='python')
        elif side == 'Buy':
            orderbook = self.ws_data['order_book_25L1.' +
                                     str(self.symbol)].query(
                                         'side.str.contains("Buy")',
                                         engine='python')
        else:
            orderbook = self.ws_data['order_book_25L1.' + str(self.symbol)]
        return orderbook

    def get_position(self):
        if not self.ws: return None

        return self.ws_data['position']

    def get_my_executions(self):

        if not self.ws: return None

        return self.ws_data['execution']

    def get_order(self):

        if not self.ws: return None

        return self.ws_data['order']

    #
    # Http Apis
    #

    def _request(self, method, path, payload):
        payload['api_key'] = self.api_key
        payload['timestamp'] = int(time.time() * 1000)
        payload = dict(sorted(payload.items()))
        for k, v in list(payload.items()):
            if v is None:
                del payload[k]

        param_str = urllib.parse.urlencode(payload)
        sign = hmac.new(self.secret.encode('utf-8'), param_str.encode('utf-8'),
                        hashlib.sha256).hexdigest()
        payload['sign'] = sign

        if method == 'GET':
            query = payload
            body = None
        else:
            query = None
            body = json.dumps(payload)

        req = Request(method, self.url + path, data=body, params=query)
        prepped = self.s.prepare_request(req)

        resp = None
        try:
            resp = self.s.send(prepped)
            resp.raise_for_status()
        except HTTPError as e:
            print(e)

        try:
            return resp.json()
        except json.decoder.JSONDecodeError as e:
            print('json.decoder.JSONDecodeError: ' + str(e))
            return resp.text

    def place_active_order(self,
                           side=None,
                           symbol=None,
                           order_type=None,
                           qty=None,
                           price=None,
                           time_in_force='GoodTillCancel',
                           take_profit=None,
                           stop_loss=None,
                           reduce_only=None,
                           order_link_id=None):

        payload = {
            'side': side,
            'symbol': symbol if symbol else self.symbol,
            'order_type': order_type,
            'qty': qty,
            'price': price,
            'time_in_force': time_in_force,
            'take_profit': take_profit,
            'stop_loss': stop_loss,
            'order_link_id': order_link_id
        }
        #/open-api/order/create
        return self._request('POST',
                             '/v2/private/order/create',
                             payload=payload)

    def place_active_order_ts(self,
                              symbol=None,
                              take_profit=None,
                              stop_loss=None,
                              trailing_stop=None,
                              new_trailing_active=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
            'take_profit': take_profit,
            'stop_loss': stop_loss,
            'trailing_stop': trailing_stop,
            'new_trailing_active': new_trailing_active
        }
        #/open-api/position/trading-stop
        return self._request('POST',
                             '/open-api/position/trading-stop',
                             payload=payload)

    def get_active_order(self,
                         order_id=None,
                         order_link_id=None,
                         symbol=None,
                         sort=None,
                         order=None,
                         page=None,
                         limit=None,
                         order_status=None):

        payload = {
            'order_id': order_id,
            'order_link_id': order_link_id,
            'symbol': symbol if symbol else self.symbol,
            'sort': sort,
            'order': order,
            'page': page,
            'limit': limit,
            'order_status': order_status
        }
        return self._request('GET', '/open-api/order/list', payload=payload)

    def cancel_active_order(self, order_id=None):

        payload = {'order_id': order_id}
        return self._request('POST', '/open-api/order/cancel', payload=payload)

    def place_conditional_order(self,
                                side=None,
                                symbol=None,
                                order_type=None,
                                qty=None,
                                price=None,
                                base_price=None,
                                stop_px=None,
                                time_in_force='GoodTillCancel',
                                close_on_trigger=None,
                                reduce_only=None,
                                order_link_id=None):

        payload = {
            'side': side,
            'symbol': symbol if symbol else self.symbol,
            'order_type': order_type,
            'qty': qty,
            'price': price,
            'base_price': base_price,
            'stop_px': stop_px,
            'time_in_force': time_in_force,
            'close_on_trigger': close_on_trigger,
            'reduce_only': reduce_only,
            'order_link_id': order_link_id
        }
        return self._request('POST',
                             '/open-api/stop-order/create',
                             payload=payload)

    def get_conditional_order(self,
                              stop_order_id=None,
                              order_link_id=None,
                              symbol=None,
                              sort=None,
                              order=None,
                              page=None,
                              limit=None):

        payload = {
            'stop_order_id': stop_order_id,
            'order_link_id': order_link_id,
            'symbol': symbol if symbol else self.symbol,
            'sort': sort,
            'order': order,
            'page': page,
            'limit': limit
        }
        return self._request('GET',
                             '/open-api/stop-order/list',
                             payload=payload)

    def cancel_conditional_order(self, order_id=None):

        payload = {'order_id': order_id}
        return self._request('POST',
                             '/open-api/stop-order/cancel',
                             payload=payload)

    def get_leverage(self):

        payload = {}
        return self._request('GET', '/user/leverage', payload=payload)

    def change_leverage(self, symbol=None, leverage=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
            'leverage': leverage
        }
        return self._request('POST', '/user/leverage/save', payload=payload)

    def get_position_http(self):

        payload = {}
        return self._request('GET', '/position/list', payload=payload)

    def change_position_margin(self, symbol=None, margin=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
            'margin': margin
        }
        return self._request('POST',
                             '/position/change-position-margin',
                             payload=payload)

    def get_prev_funding_rate(self, symbol=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
        }
        return self._request('GET',
                             '/open-api/funding/prev-funding-rate',
                             payload=payload)

    def get_prev_funding(self, symbol=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
        }
        return self._request('GET',
                             '/open-api/funding/prev-funding',
                             payload=payload)

    def get_predicted_funding(self, symbol=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
        }
        return self._request('GET',
                             '/open-api/funding/predicted-funding',
                             payload=payload)

    def get_my_execution(self, order_id=None):

        payload = {'order_id': order_id}
        return self._request('GET',
                             '/v2/private/execution/list',
                             payload=payload)

    #
    # New Http Apis (developing)
    #

    def symbols(self):

        payload = {}
        return self._request('GET', '/v2/public/symbols', payload=payload)

    def kline(self, symbol=None, interval=None, _from=None, limit=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
            'interval': interval,
            'from': _from,
            'limit': limit
        }
        return self._request('GET', '/v2/public/kline/list', payload=payload)

    def place_active_order_v2(self,
                              symbol=None,
                              side=None,
                              order_type=None,
                              qty=None,
                              price=None,
                              time_in_force='GoodTillCancel',
                              order_link_id=None):

        payload = {
            'symbol': symbol if symbol else self.symbol,
            'side': side,
            'order_type': order_type,
            'qty': qty,
            'price': price,
            'time_in_force': time_in_force,
            'order_link_id': order_link_id
        }
        return self._request('POST',
                             '/v2/private/order/create',
                             payload=payload)

    def cancel_active_order_v2(self, order_id=None):

        payload = {'order_id': order_id}
        return self._request('POST',
                             '/v2/private/order/cancel',
                             payload=payload)

    def cancel_all_active_orders(self, symbol=None):

        payload = {'symbol': symbol if symbol else self.symbol}
        return self._request('POST',
                             '/v2/private/order/cancelAll',
                             payload=payload)

    def cancel_all_conditional_orders(self, symbol=None):

        payload = {'symbol': symbol if symbol else self.symbol}
        return self._request('POST',
                             '/v2/private/stop-order/cancelAll',
                             payload=payload)

    def get_active_order_real_time(self, symbol=None):

        payload = {'symbol': symbol if symbol else self.symbol}
        return self._request('GET', '/v2/private/order', payload=payload)

    def get_wallet_balance(self, coin=None):

        payload = {'coin': coin if coin else self.symbol}
        return self._request('GET',
                             '/v2/private/wallet/balance',
                             payload=payload)

    def get_tickers(self, symbol=None):

        payload = {'symbol': symbol if symbol else self.symbol}
        return self._request('GET', '/v2/public/tickers', payload=payload)

    def get_api_data(self):

        payload = {}
        return self._request('GET', '/open-api/api-key', payload=payload)

    def replace_active_order(self,
                             order_id=None,
                             symbol=None,
                             p_r_qty=None,
                             p_r_price=None):

        payload = {
            'order_id': order_id,
            'symbol': symbol if symbol else self.symbol,
            'p_r_qty': p_r_qty,
            'p_r_price': p_r_price
        }
        return self._request('POST',
                             '/open-api/order/replace',
                             payload=payload)
Example #51
0
 def test_login_user(self):
     self.register_user(self.USER_1_PAYLOAD)
     session = Session()
     response = self.login_user(session, self.USER_1_PAYLOAD)
     self.assertEqual(response.status_code, 201)
     self.assertTrue('token' in session.cookies)
from requests import Session

from config import (ACCESS_TOKEN, BASE_URL, PASSWORD, URLS_EXPORT_PATH,
                    USER_ID, USERNAME)

LINK_TYPES = {'ssh': 'ssh_url_to_repo', 'http': 'http_url_to_repo'}
LINK_TYPE = LINK_TYPES.get(sys.argv[-1].lower(), None)

if not LINK_TYPE:
    print('Specify link type for generating URLs:'
          '\n    python export_repos_urls.py ssh'
          '\n    python export_repos_urls.py http')
    exit(1)

session = Session()
session.headers = {'Authorization': f'Bearer {ACCESS_TOKEN}'}


def generate_url(endpoint: str) -> str:
    """Generate Endpoint URL."""
    return f'{BASE_URL}{endpoint}'


def get_user_projects(user_id: str) -> list:
    """Get user projects."""
    projects = []
    URL = generate_url('projects')
    id_after = None

    def get_projects_list(id_after=None):
Example #53
0
class Client(object):
    u"""Starcoin sdk client
    """
    def __init__(
        self,
        url,
    ):
        self.request = RpcRequest(url)
        self.session = Session()

    def execute(self, operation):
        u""" Execute a rpc request operation
        operation = {
            "rpc_method": $rpc_method,
            "params": $params,
        }
        such as:
        operation = {
            "rpc_method": "node.info",
            "params": None,
        }

        """
        req = self.request.prepare(rpc_method=operation[u"rpc_method"],
                                   params=operation[u"params"])
        resp = self.session.send(req)
        resp.raise_for_status()
        try:
            json = resp.json()
        except ValueError as e:
            raise_with_traceback(
                InvalidServerResponse(
                    "Parse response as json failed: {}, response: {}".format(
                        e, resp.text)))
        if json.get(u"error") is not None:
            raise_with_traceback(
                JsonResponseError("Response:{}".format(resp.text)))
        return json.get(u"result")

    def node_info(self, ):
        u"""Starcoin node information

        Return the node information
        """
        operation = {
            u"rpc_method": u"node.info",
            u"params": None,
        }
        return self.execute(operation)

    def node_status(self, ):
        u""" Starcoin node status

        """
        operation = {
            u"rpc_method": u"node.status",
            u"params": None,
        }
        ret = self.execute(operation)
        return ret

    def get_transaction(self, txn_hash):
        operation = {
            u"rpc_method": u"chain.get_transaction",
            u"params": [txn_hash],
        }
        ret = self.execute(operation)
        return ret

    def get_transaction_info(self, txn_hash):
        operation = {
            u"rpc_method": u"chain.get_transaction_info",
            u"params": [txn_hash],
        }
        ret = self.execute(operation)
        return ret

    def get_block_by_number(self, number):
        operation = {
            u"rpc_method": u"chain.get_block_by_number",
            u"params": [number],
        }
        ret = self.execute(operation)
        return ret

    def submit(self, txn):
        operation = {
            u"rpc_method": u"txpool.submit_hex_transaction",
            u"params": [txn]
        }
        return self.execute(operation)

    def state_get(self, access_path):
        operation = {u"rpc_method": u"state.get", u"params": [access_path]}
        ret = self.execute(operation)
        if ret is None:
            raise_with_traceback(StateNotFoundError(u"State not found"))
        return ret

    def is_account_exist(self, addr):
        try:
            self.get_account_resource(addr)
        except StateNotFoundError:
            return False
        return True

    def get_account_sequence(self, addr):
        try:
            account_resource = self.get_account_resource(addr)
        except StateNotFoundError:
            return 0
        return int(account_resource.sequence_number)

    def get_account_token(self, addr, module, name):
        type_parm = u"{}::{}::{}".format(CORE_CODE_ADDRESS, module, name)

        struct_tag = u"{}::{}::{}<{}>".format(CORE_CODE_ADDRESS, u"Account",
                                              u"Balance", type_parm)
        path = u"{}/{}/{}".format(addr, RESOURCE_TAG, struct_tag)
        state = self.state_get(path)
        return state

    def get_account_resource(self, addr):
        struct_tag = u"{}::{}::{}".format(CORE_CODE_ADDRESS, u"Account",
                                          u"Account")
        path = u"{}/{}/{}".format(addr, RESOURCE_TAG, struct_tag)
        state = self.state_get(path)
        return state

    def sign_txn(self, raw_txn):
        operation = {
            u"rpc_method": u"account.sign_txn_request",
            u"params": [raw_txn],
        }
        ret = self.execute(operation)
        return ret

    def get_block_reward(self, block_number):
        u""" get block reward by blcok_number,block_number shoule less than header.block_number
        return coin_reward, author, gas_fee
        """
        operation = {
            u"rpc_method": u"chain.get_block_by_number",
            u"params": [block_number + 1],
        }
        state_root = self.execute(operation).get("header").get("state_root")
        operation = {
            u"rpc_method": u"state.get_account_state_set",
            u"params": ["0x1", state_root],
        }
        state_set = self.execute(operation)
        infos = state_set.get("resources").get(
            "0x00000000000000000000000000000001::BlockReward::RewardQueue"
        ).get("value")[1][1].get("Vector")

        for info in infos:
            info = info.get("Struct").get("value")
            if int(info[0][1].get("U64")) != block_number:
                continue
            reward = int(info[1][1].get("U128"))
            author = info[2][1].get("Address")
            gas_fee = int(
                info[3][1].get("Struct").get("value")[0][1].get("U128"))
        return (reward, author, gas_fee)
Example #54
0
 def test_login_user_when_he_already_loged(self):
     self.register_user(self.USER_1_PAYLOAD)
     session = Session()
     self.login_user(session, self.USER_1_PAYLOAD)
     response = self.login_user(session, self.USER_1_PAYLOAD)
     self.assertEqual(response.status_code, 201)
    books[progress]['id'] = bid
    books[progress]['link'] = bpath

    if progress % progress_counter == 0:
        print(
            f"Template making progress: {progress}, book ID {bid}, book path {bpath}")
    icons = []
    if book["audience"] == "C":
        icons.append("C")
    if book["reviewed"]:
        icons.append("R")
        lastReviewed = bid
    last = bid
    ipath = osp.join(osp.dirname(bpath), "index.html")
    sess = Session()

    title_image_path, title_image = imgurl(book['pages'][0]['url'], bid, bpath, sess)
    if not title_image:
        slugs_not_found.add(book['slug'])
        continue

    books[progress]['image'] = title_image_path

    ndx.append(
        dict(
            title=book["title"],
            author=book["author"],
            pages=len(book["pages"]),
            image=title_image,
            icons=" ".join(icons),
Example #56
0
 def __init__(
     self,
     url,
 ):
     self.request = RpcRequest(url)
     self.session = Session()
Example #57
0
async def session() -> str:
    async with Session() as session:
        yield session
import json
import zxcvbn
import re
from requests import Request, Session

req = Request('LIST',
              url="http://172.17.0.2:1234/v1/secret/metadata/",
              headers={"X-Vault-Token": "myroot"})
prep = req.prepare()
resp = Session().send(prep).text
list_keys = json.loads(resp).get("data").get("keys")

print(list_keys)

for keys in list_keys:
    req_metadata = Request('GET',
                           url="http://172.17.0.2:1234/v1/secret/metadata/%s" %
                           keys,
                           headers={"X-Vault-Token": "myroot"})
    prep_metadata = req_metadata.prepare()
    resp_metadata = Session().send(prep_metadata).text
    keys_metadata = json.loads(resp_metadata).get("data")
    print(keys_metadata['created_time'])
    #    json.dumps
    print(keys_metadata)

for keys in list_keys:
    req = Request('GET',
                  url="http://172.17.0.2:1234/v1/secret/data/%s" % keys,
                  headers={"X-Vault-Token": "myroot"})
    prep = req.prepare()
Example #59
0
class Connector:
    """
    This is the main class of data_connector component.
    Initialize Connector class as the example code.

    Parameters
    ----------
    config_path
        The path to the config. It can be hosted, e.g. "yelp", or from
        local filesystem, e.g. "./yelp"
    auth_params
        The parameter for authentication, e.g. OAuth2
    kwargs
        Additional parameters

    Example
    -------
    >>> from dataprep.data_connector import Connector
    >>> dc = Connector("yelp", auth_params={"access_token":access_token})
    """

    _impdb: ImplicitDatabase
    _vars: Dict[str, Any]
    _auth_params: Dict[str, Any]
    _session: Session
    _jenv: Environment

    def __init__(
        self,
        config_path: str,
        auth_params: Optional[Dict[str, Any]] = None,
        **kwargs: Any,
    ) -> None:
        self._session = Session()
        if (
            config_path.startswith(".")
            or config_path.startswith("/")
            or config_path.startswith("~")
        ):
            path = Path(config_path).resolve()
        else:
            # From Github!
            ensure_config(config_path)
            path = config_directory() / config_path

        self._impdb = ImplicitDatabase(path)

        self._vars = kwargs
        self._auth_params = auth_params or {}
        self._jenv = Environment(undefined=StrictUndefined)

    def _fetch(
        self,
        table: ImplicitTable,
        auth_params: Optional[Dict[str, Any]],
        kwargs: Dict[str, Any],
    ) -> Response:
        method = table.method
        url = table.url
        req_data: Dict[str, Dict[str, Any]] = {
            "headers": {},
            "params": {},
            "cookies": {},
        }

        merged_vars = {**self._vars, **kwargs}
        if table.authorization is not None:
            table.authorization.build(req_data, auth_params or self._auth_params)

        for key in ["headers", "params", "cookies"]:
            if getattr(table, key) is not None:
                instantiated_fields = getattr(table, key).populate(
                    self._jenv, merged_vars
                )
                req_data[key].update(**instantiated_fields)
        if table.body is not None:
            # TODO: do we support binary body?
            instantiated_fields = table.body.populate(self._jenv, merged_vars)
            if table.body_ctype == "application/x-www-form-urlencoded":
                req_data["data"] = instantiated_fields
            elif table.body_ctype == "application/json":
                req_data["json"] = instantiated_fields
            else:
                raise UnreachableError

        resp: Response = self._session.send(  # type: ignore
            Request(
                method=method,
                url=url,
                headers=req_data["headers"],
                params=req_data["params"],
                json=req_data.get("json"),
                data=req_data.get("data"),
                cookies=req_data["cookies"],
            ).prepare()
        )

        if resp.status_code != 200:
            raise RequestError(status_code=resp.status_code, message=resp.text)

        return resp

    @property
    def table_names(self) -> List[str]:
        """
        Return all the names of the available tables in a list.

        Note
        ----
        We abstract each website as a database containing several tables.
        For example in Spotify, we have artist and album table.
        """
        return list(self._impdb.tables.keys())

    def info(self) -> None:
        """
        Show the basic information and provide guidance for users to issue queries.
        """

        # get info
        tbs: Dict[str, Any] = {}
        for cur_table in self._impdb.tables:
            table_config_content = self._impdb.tables[cur_table].config
            params_required = []
            params_optional = []
            example_query_fields = []
            count = 1
            for k, val in table_config_content["request"]["params"].items():
                if isinstance(val, bool) and val:
                    params_required.append(k)
                    example_query_fields.append(f"""{k}="word{count}\"""")
                    count += 1
                elif isinstance(val, bool):
                    params_optional.append(k)
            tbs[cur_table] = {}
            tbs[cur_table]["required_params"] = params_required
            tbs[cur_table]["optional_params"] = params_optional
            tbs[cur_table]["joined_query_fields"] = example_query_fields

        # show table info
        print(
            INFO_TEMPLATE.render(
                ntables=len(self.table_names), dbname=self._impdb.name, tbs=tbs
            )
        )

    def show_schema(self, table_name: str) -> pd.DataFrame:
        """
        This method shows the schema of the table that will be returned,
        so that the user knows what information to expect.

        Parameters
        ----------
        table_name
            The table name.

        Returns
        -------
        pd.DataFrame
            The returned data's schema.

        Note
        ----
        The schema is defined in the configuration file.
        The user can either use the default one or change it by editing the configuration file.
        """
        print(f"table: {table_name}")
        table_config_content = self._impdb.tables[table_name].config
        schema = table_config_content["response"]["schema"]
        new_schema_dict: Dict[str, List[Any]] = {}
        new_schema_dict["column_name"] = []
        new_schema_dict["data_type"] = []
        for k in schema.keys():
            new_schema_dict["column_name"].append(k)
            new_schema_dict["data_type"].append(schema[k]["type"])
        return pd.DataFrame.from_dict(new_schema_dict)

    def query(
        self, table: str, auth_params: Optional[Dict[str, Any]] = None, **where: Any,
    ) -> pd.DataFrame:
        """
        Use this method to query the API and get the returned table.

        Example
        -------
        >>> df = dc.query('businesses', term="korean", location="vancouver)

        Parameters
        ----------
        table
            The table name.
        auth_params
            The parameters for authentication. Usually the authentication parameters
            should be defined when instantiating the Connector. In case some tables have different
            authentication options, a different authentication parameter can be defined here.
            This parameter will override the one from Connector if passed.
        where
            The additional parameters required for the query.

        Returns
        -------
            pd.DataFrame
                A DataFrame that contains the data returned by the website API.
        """
        assert (
            table in self._impdb.tables
        ), f"No such table {table} in {self._impdb.name}"

        itable = self._impdb.tables[table]

        resp = self._fetch(itable, auth_params, where)

        return itable.from_response(resp)
Example #60
0
class PodnapisiProvider(Provider):
    """Podnapisi Provider."""
    languages = ({Language('por', 'BR'),
                  Language('srp', script='Latn')} | {
                      Language.fromalpha2(l)
                      for l in language_converters['alpha2'].codes
                  })
    server_url = 'http://podnapisi.net/subtitles/'
    subtitle_class = PodnapisiSubtitle

    def initialize(self):
        self.session = Session()
        self.session.headers[
            'User-Agent'] = 'Subliminal/%s' % __short_version__

    def terminate(self):
        self.session.close()

    def query(self, language, keyword, season=None, episode=None, year=None):
        # set parameters, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164#p212652
        params = {'sXML': 1, 'sL': str(language), 'sK': keyword}
        is_episode = False
        if season and episode:
            is_episode = True
            params['sTS'] = season
            params['sTE'] = episode
        if year:
            params['sY'] = year

        # loop over paginated results
        logger.info('Searching subtitles %r', params)
        subtitles = []
        pids = set()
        while True:
            # query the server
            xml = etree.fromstring(
                self.session.get(self.server_url + 'search/old',
                                 params=params,
                                 timeout=10).content)

            # exit if no results
            if not int(xml.find('pagination/results').text):
                logger.debug('No subtitles found')
                break

            # loop over subtitles
            for subtitle_xml in xml.findall('subtitle'):
                # read xml elements
                language = Language.fromietf(
                    subtitle_xml.find('language').text)
                hearing_impaired = 'n' in (subtitle_xml.find('flags').text
                                           or '')
                page_link = subtitle_xml.find('url').text
                pid = subtitle_xml.find('pid').text
                releases = []
                if subtitle_xml.find('release').text:
                    for release in subtitle_xml.find('release').text.split():
                        release = re.sub(r'\.+$', '',
                                         release)  # remove trailing dots
                        release = ''.join(
                            filter(lambda x: ord(x) < 128,
                                   release))  # remove non-ascii characters
                        releases.append(release)
                title = subtitle_xml.find('title').text
                season = int(subtitle_xml.find('tvSeason').text)
                episode = int(subtitle_xml.find('tvEpisode').text)
                year = int(subtitle_xml.find('year').text)

                if is_episode:
                    subtitle = self.subtitle_class(language,
                                                   hearing_impaired,
                                                   page_link,
                                                   pid,
                                                   releases,
                                                   title,
                                                   season=season,
                                                   episode=episode,
                                                   year=year)
                else:
                    subtitle = self.subtitle_class(language,
                                                   hearing_impaired,
                                                   page_link,
                                                   pid,
                                                   releases,
                                                   title,
                                                   year=year)

                # ignore duplicates, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164&start=10#p213321
                if pid in pids:
                    continue

                logger.debug('Found subtitle %r', subtitle)
                subtitles.append(subtitle)
                pids.add(pid)

            # stop on last page
            if int(xml.find('pagination/current').text) >= int(
                    xml.find('pagination/count').text):
                break

            # increment current page
            params['page'] = int(xml.find('pagination/current').text) + 1
            logger.debug('Getting page %d', params['page'])

        return subtitles

    def list_subtitles(self, video, languages):
        if isinstance(video, Episode):
            return [
                s for l in languages for s in self.query(l,
                                                         video.series,
                                                         season=video.season,
                                                         episode=video.episode,
                                                         year=video.year)
            ]
        elif isinstance(video, Movie):
            return [
                s for l in languages
                for s in self.query(l, video.title, year=video.year)
            ]

    def download_subtitle(self, subtitle):
        # download as a zip
        logger.info('Downloading subtitle %r', subtitle)
        r = self.session.get(self.server_url + subtitle.pid + '/download',
                             params={'container': 'zip'},
                             timeout=10)
        r.raise_for_status()

        # open the zip
        with ZipFile(io.BytesIO(r.content)) as zf:
            if len(zf.namelist()) > 1:
                raise ProviderError('More than one file to unzip')

            subtitle.content = fix_line_ending(zf.read(zf.namelist()[0]))