Ejemplo n.º 1
0
  def assertRequestBodyIncludes(self, params):
    body = responses.calls[0].request.body

    for k in params:
      param = quote_plus(str(k)) + '=' + quote_plus(str(params[k]))

      self.assertIn(param, body)
Ejemplo n.º 2
0
 def createProjectOutput(self, project, hostBene):
     output = ('cordis:projects/' + project.identifier + ' a dbo:ResearchProject;\n\t' +
               'dpro:projectReferenceID\t"' + self.setLiterals(project.referenceID) + '";\n\t' +
               'doap:name\t"' + self.setLiterals(project.name) + '";\n\t' +
               'rdfs:label\t"' + self.setLiterals(project.name) + '";\n\t' +
               'dc:title\t"' + self.setLiterals(project.title) + '";\n\t')
     if len(project.homepage) > 1:
         output = output + 'doap:homepage\t<' + project.homepage + '>;\n\t'
     if len(project.startDate) > 1:
         output = output + ( 'dbo:projectStartDate\t"' + project.startDate.split('/')[2] + '-' + project.startDate.split('/')[0] + '-' + project.startDate.split('/')[1] + '"^^xsd:date;\n\t' +
                             'dbo:projectEndDate\t"' + project.endDate.split('/')[2] + '-' + project.endDate.split('/')[0] + '-' + project.endDate.split('/')[1] + '"^^xsd:date;\n\t')
     if len(project.status) > 1:
         output = output + 'cordis:status\t"' + project.status + '";\n\t'
     output = output + ('cordis:programme\t"' + self.setLiterals(project.programme) + '";\n\t' +
                        'cordis:frameworkProgramme\t"' + self.setLiterals(project.frameworkProgramme) + '";\n\t' +
                        'cordis:projectTopics\t"' + self.setLiterals(project.topics) + '";\n\t')
     if len(project.fundingScheme) > 1:
         output = output + 'cordis:projectFundingScheme\t"' + project.fundingScheme + '";\n\t'
     output = output + ('dbo:projectBudgetFunding\t' + project.budgetFunding.replace(',','.') + '^^<http://dbpedia.org/datatype/euro>;\n\t' +
                        'dbo:projectBudgetTotal\t' + project.budgetTotal.replace(',','.') +'^^<http://dbpedia.org/datatype/euro>;\n\t' +
                        'dbo:projectCoordinator\tcordis:organizations/' + parse.quote_plus(project.coordinator) + ';\n\t')
     if len(project.participants) > 1:
         for participant in project.participants:
             output = output + 'dbo:projectParticipant\tcordis:organizations/' + parse.quote_plus(participant) + ';\n\t'
     for org in hostBene:
         if project.identifier == org[1]:
             if org[0] == 'hostInstitution':
                 output = output + 'cordis:hostInstitution\tcordis:organizations/' + parse.quote_plus(org[2]) + ';\n\t'
             elif org[0] == 'beneficiary':
                 output = output + 'cordis:beneficiary\tcordis:organizations/' + parse.quote_plus(org[2]) + ';\n\t'
     if len(project.subjects) > 1:
         for subject in project.subjects:
             output = output + 'cordis:projectSubject\t"' + subject + '";\n\t'
     output = output + 'dbo:projectObjective\t"' + self.setLiterals(project.objective) + '";\n\t.\n\n'
     return output
Ejemplo n.º 3
0
    def get(self, entity, q=None, attributes=None, num = 100, start = 0, sortColumn= None, sortOrder = None):
        '''Retrieves entity rows from an entity repository.

        Args:
        entity -- fully qualified name of the entity
        q -- query in json form, see the MOLGENIS REST API v1 documentation for details
        attributes -- The list of attributes to retrieve
        num -- the amount of entity rows to retrieve
        start -- the index of the first row to retrieve (zero indexed)
        sortColumn -- the attribute to sort on
        sortOrder -- the order to sort in

        Examples:
        session.get('Person')
        '''
        if q:
            response = self.session.post(self.url + "v1/" + quote_plus(entity).replace('%2F','/'),
                headers = self._get_token_header_with_content_type(),
                params={"_method":"GET", "attributes":attributes, "num": num, "start": start, "sortColumn":sortColumn, "sortOrder": sortOrder},
                data=json.dumps({"q":q}))
        else:
            response = self.session.get(self.url + "v1/" + quote_plus(entity),
                headers = self._get_token_header(),
                params={"attributes":attributes, "num": num, "start": start, "sortColumn":sortColumn, "sortOrder": sortOrder})
        if response.status_code == 200:
            if 'items' in response.json():
                return response.json()["items"]
            else:
                return response.json()
        response.raise_for_status();
        return response;
Ejemplo n.º 4
0
def getMapUrl(mapType, width, height, lat, lng, zoom):
    urlbase = "http://maps.google.com/maps/api/staticmap?"
    params = ''
    if len(tweetListBox.curselection()) != 0:
        selected = tweetListBox.curselection()[0]
        if testtwitter.tweets[selected]['coordinates'] is not None:
            tweetCoord = testtwitter.tweets[selected]['coordinates']['coordinates']
            params = "maptype={}&center={},{}&zoom={}&size={}x{}&format=gif&markers=color:red%7C{},{}&markers=color:blue%7Csize:small".format(mapType, lat, lng, zoom, width, height, tweetCoord[1], tweetCoord[0])
        else:
            params = "maptype={}&center={},{}&zoom={}&size={}x{}&format=gif&markers=color:red%7C{}&markers=color:blue%7Csize:small".format(mapType, lat, lng, zoom, width, height, quote_plus(testtwitter.tweets[selected]['user']['location']))

        counter = 0
        for tweet in testtwitter.tweets:
            if counter == selected:
                counter += 1
                continue
            if tweet['coordinates'] is not None:
                tweetCoord = tweet['coordinates']['coordinates']
                params += "%7C{},{}".format(tweetCoord[1], tweetCoord[0])
            else:
                params += "%7C{}".format(quote_plus(tweet['user']['location']))
            counter += 1
    else:
        params = "maptype={}&center={},{}&zoom={}&size={}x{}&format=gif&markers=color:blue%7Csize:small".format(mapType, lat, lng, zoom, width, height)
        for tweet in testtwitter.tweets:
            if tweet['coordinates'] is not None:
                tweetCoord = tweet['coordinates']['coordinates']
                params += "%7C{},{}".format(tweetCoord[1], tweetCoord[0])
            else:
                params += "%7C{}".format(quote_plus(tweet['user']['location']))
    return urlbase + params
Ejemplo n.º 5
0
    def setUp(self):
        self.user = User.objects.create_user('rulz', '*****@*****.**', '12345')
        self.task = Task.objects.create(
            name='tarea', description='description tarea', owner=self.user
        )

        #oauth2_provider para crear los token al usuario que se loguea
        self.application = Application(
            name="todo",
            user=self.user,
            client_type=Application.CLIENT_PUBLIC,
            authorization_grant_type=Application.GRANT_PASSWORD,
        )
        self.application.save()

        self.token_request_data = {
            'grant_type': 'password',
            'username': '******',
            'password': '******'
        }

        self.auth_headers = self.get_basic_auth_header(
            urllib.quote_plus(self.application.client_id),
            urllib.quote_plus(self.application.client_secret)
        )

        self.response = self.client.post(reverse('oauth2_provider:token'),
                                         data=self.token_request_data, **self.auth_headers)
        content = json.loads(self.response.content.decode("utf-8"))
        self.headers = {'Authorization': 'Bearer %(token)s' % {'token': content['access_token']}}
Ejemplo n.º 6
0
def get_connection(host='localhost', port=27017, username=None, password=None,
                   uri=None, mongodb=None, authdb=None, timeout=20, *args, **kwargs):
    """Get a client to the mongo database

        host(str): Host of database
        port(int): Port of database
        username(str)
        password(str)
        uri(str)
        authdb (str): database to use for authentication
        timeout(int): How long should the client try to connect

    """
    authdb = authdb or mongodb
    if uri is None:
        if username and password:
            uri = ("mongodb://{}:{}@{}:{}/{}"
                   .format(quote_plus(username), quote_plus(password), host, port, authdb))
            log_uri = ("mongodb://{}:****@{}:{}/{}"
                   .format(quote_plus(username), host, port, authdb))
        else:
            log_uri = uri = "mongodb://%s:%s" % (host, port)
            

    LOG.info("Try to connect to %s" % log_uri)
    try:
        client = MongoClient(uri, serverSelectionTimeoutMS=timeout)
    except ServerSelectionTimeoutError as err:
        LOG.warning("Connection Refused")
        raise ConnectionFailure

    LOG.info("Connection established")
    return client
Ejemplo n.º 7
0
 def _get_search_content(self, kind, ton, results):
     """Retrieve the web page for a given search.
     kind can be 'tt' (for titles), 'nm' (for names),
     'char' (for characters) or 'co' (for companies).
     ton is the title or the name to search.
     results is the maximum number of results to be retrieved."""
     if isinstance(ton, unicode):
         try:
             ton = ton.encode('utf-8')
         except Exception as e:
             try:
                 ton = ton.encode('iso8859-1')
             except Exception as e:
                 pass
     ##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results))
     params = 'q=%s&s=%s&mx=%s' % (quote_plus(ton), kind, str(results))
     if kind == 'ep':
         params = params.replace('s=ep&', 's=tt&ttype=ep&', 1)
     cont = self._retrieve(self.urls['find'] % params)
     #print 'URL:', imdbURL_find % params
     if cont.find('Your search returned more than') == -1 or \
             cont.find("displayed the exact matches") == -1:
         return cont
     # The retrieved page contains no results, because too many
     # titles or names contain the string we're looking for.
     params = 'q=%s&ls=%s&lm=0' % (quote_plus(ton), kind)
     size = 131072 + results * 512
     return self._retrieve(self.urls['find'] % params, size=size)
Ejemplo n.º 8
0
    def test_sasl_plain(self):

        client = MongoClient(SASL_HOST, SASL_PORT)
        self.assertTrue(client.ldap.authenticate(SASL_USER, SASL_PASS,
                                                 SASL_DB, 'PLAIN'))
        client.ldap.test.find_one()

        uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;'
               'authSource=%s' % (quote_plus(SASL_USER),
                                  quote_plus(SASL_PASS),
                                  SASL_HOST, SASL_PORT, SASL_DB))
        client = MongoClient(uri)
        client.ldap.test.find_one()

        set_name = client.admin.command('ismaster').get('setName')
        if set_name:
            client = MongoClient(SASL_HOST,
                                 port=SASL_PORT,
                                 replicaSet=set_name)
            self.assertTrue(client.ldap.authenticate(SASL_USER, SASL_PASS,
                                                     SASL_DB, 'PLAIN'))
            client.ldap.test.find_one()

            uri = ('mongodb://%s:%s@%s:%d/?authMechanism=PLAIN;'
                   'authSource=%s;replicaSet=%s' % (quote_plus(SASL_USER),
                                                    quote_plus(SASL_PASS),
                                                    SASL_HOST, SASL_PORT,
                                                    SASL_DB, str(set_name)))
            client = MongoClient(uri)
            client.ldap.test.find_one()
Ejemplo n.º 9
0
    def search(self, query):
        self.query = query
        params = []
        hparams = {
            'q': self.query,
            'group': self.group,
            'nresults': self.nresults,
            'start': self.start,
            'match':self.match,
            'nocollapse': 'true' if self.nocollapse else 'false',
            'nfo': 'true' if self.nfo else 'false',
            'passwd': 'true' if self.passwd else 'false',
            'complete': self.complete,
            'key': Request.key
        }
        for param in iter(hparams):
            if hparams[param]: 
                params.append('%s=%s' % (
                    quote_plus(str(param)), 
                    quote_plus(str(hparams[param]))))
        url = '%s?%s' % (Request.url, '&'.join(params))

        print(url)
        content = urlopen(url).read().decode('utf-8')

        return Result(json.loads(content))
Ejemplo n.º 10
0
def create_linkedin_button(url, quote, hashtags, config):
    return config['snippet_linkedin'].format(
        url=url,
        urlq=quote_plus(url),
        quote=quote_plus(quote.encode('utf-8')),
        icon_linkedin=config['icon_linkedin']
    )
Ejemplo n.º 11
0
    def instance_url(self):
        extn = quote_plus(self.id)

        if hasattr(self, "customer"):
            customer = self.customer

            base = Customer.class_url()
            owner_extn = quote_plus(customer)
            class_base = "sources"

        elif hasattr(self, "recipient"):
            recipient = self.recipient

            base = Recipient.class_url()
            owner_extn = quote_plus(recipient)
            class_base = "cards"

        elif hasattr(self, "account"):
            account = self.account

            base = Account.class_url()
            owner_extn = quote_plus(account)
            class_base = "external_accounts"

        else:
            raise error.InvalidRequestError(
                "Could not determine whether card_id %s is attached to a customer, " "recipient, or account." % self.id,
                "id",
            )

        return "%s/%s/%s/%s" % (base, owner_extn, class_base, extn)
Ejemplo n.º 12
0
    def url(self):
        """
Returns the URL used for all subsequent requests.

:return: (str) URL to be called
:since:  v1.0.0
        """

        _return = "{0}://".format(self.scheme)

        if (self.auth_username is not None or self.auth_password is not None):
            if (self.auth_username is not None): _return += quote_plus(self.auth_username)
            _return += ":"
            if (self.auth_password is not None): _return += quote_plus(self.auth_password)
            _return += "@"
        #

        _return += self.host

        if ((self.scheme != "https" or self.port != http_client.HTTPS_PORT)
                and (self.scheme != "http" or self.port != http_client.HTTP_PORT)
        ): _return += ":{0:d}".format(self.port)

        _return += self.path

        return _return
Ejemplo n.º 13
0
def create_facebook_button(url, quote, hashtags, config):
    return config['snippet_facebook'].format(
        url=url,
        urlq=quote_plus(url),
        quote=quote_plus((quote + format_hashtags(hashtags)).encode('utf-8')),
        icon_facebook=config['icon_facebook']
    )
Ejemplo n.º 14
0
	def validate(self,dic):
		params = {}

		openid_endpoint = dic["openid.op_endpoint"]
		signed = dic["openid.signed"].split(",")
		for item in signed:
			params["openid."+item] = parse.quote_plus(dic["openid."+item])
		params['openid.mode'] = 'check_authentication'

		params["openid.assoc_handle"] = dic['openid.assoc_handle'] 
		params["openid.signed"] = dic['openid.signed']
		params["openid.sig"] = parse.quote_plus(dic['openid.sig'])
		params["openid.ns"] = 'http://specs.openid.net/auth/2.0'

		data = "?"
		for key, value in params.items():
			data += key+"="+value+"&"
		data = data[:-1]
		print("https://steamcommunity.com/openid/login"+data)
		check = requests.get("https://steamcommunity.com/openid/login"+data)
		check = str(check.content)
		if check.split("\\n")[1] == "is_valid:true":
			return dic["openid.claimed_id"]
		else:
			return False
Ejemplo n.º 15
0
def get_games_with_system(search, system):
    scraper_sysid = __scrapermap__[system]
    results = []
    try:
        req = request.Request('http://thegamesdb.net/api/GetGamesList.php?name='+parse.quote_plus(search)+'&platform='+parse.quote_plus(scraper_sysid))
        req.add_unredirected_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31')
        f = request.urlopen(req)
        page = f.read().decode('utf-8').replace('\n','')
        if system == "Sega Genesis":
            req = request.Request('http://thegamesdb.net/api/GetGamesList.php?name='+parse.quote_plus(search)+'&platform='+parse.quote_plus('Sega Mega Drive'))
            req.add_unredirected_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31')
            f2 = request.urlopen(req)
            page = page + f2.read().replace("\n", "")
        games = re.findall("<Game><id>(.*?)</id><GameTitle>(.*?)</GameTitle>(.*?)<Platform>(.*?)</Platform></Game>",
                           page)
        for item in games:
            game = {}
            game["id"] = item[0]
            game["title"] = item[1]
            game["system"] = item[3]
            game["order"] = 1
            if game["title"].lower() == search.lower():
                game["order"] += 1
            if game["title"].lower().find(search.lower()) != -1:
                game["order"] += 1
            if game["system"] == scraper_sysid:
                game["system"] = system
                results.append(game)
        results.sort(key=lambda result: result["order"], reverse=True)
        return [SearchResult(result["title"], result["id"]) for result in results]
    except:
        return None
Ejemplo n.º 16
0
 def update(self, queue, subscription_id, project=None, **kwargs):
     container = utils._subscription_container(queue, project)
     data = self.get(queue, subscription_id, project)
     data.pop('age')
     ttl = data['ttl']
     if 'subscriber' in kwargs:
         sub_container = utils._subscriber_container(queue, project)
         try:
             self._client.put_object(
                 sub_container,
                 quote_plus(kwargs['subscriber']),
                 contents=subscription_id,
                 headers={'x-delete-after': ttl, 'if-none-match': '*'})
         except swiftclient.ClientException as exc:
             if exc.http_status == 412:
                 raise errors.SubscriptionAlreadyExists()
             raise
         self._client.delete_object(sub_container,
                                    quote_plus(data['subscriber']))
     data.update(kwargs)
     self._client.put_object(container,
                             subscription_id,
                             contents=jsonutils.dumps(data),
                             content_type='application/json',
                             headers={'x-delete-after': ttl})
Ejemplo n.º 17
0
 def set_ethernet_if(self, cmd_list, if_id,
                     ip_address, descr):
     if_cmd = self.get_interface()
     cmd_list.append(VyattaSetAPICall("interfaces/{0}/{1}/address/{2}"
                                      .format(if_cmd, if_id, quote_plus(ip_address))))
     cmd_list.append(VyattaSetAPICall("interfaces/{0}/{1}/description/{2}"
                                      .format(if_cmd, if_id, quote_plus(descr))))
Ejemplo n.º 18
0
    def _createExtruderQualityChangesForSingleExtrusionMachine(self, filename, global_quality_changes):
        suffix = "_" + quote_plus(global_quality_changes["general"]["name"].lower())
        machine_name = os.path.os.path.basename(filename).replace(".inst.cfg", "").replace(suffix, "")

        # Why is this here?!
        # When we load a .curaprofile file the deserialize will trigger a version upgrade, creating a dangling file.
        # This file can be recognized by it's lack of a machine name in the target filename.
        # So when we detect that situation here, we don't create the file and return.
        if machine_name == "":
            return

        new_filename = machine_name + "_" + "fdmextruder" + suffix

        extruder_quality_changes_parser = configparser.ConfigParser(interpolation = None)
        extruder_quality_changes_parser.add_section("general")
        extruder_quality_changes_parser["general"]["version"] = str(2)
        extruder_quality_changes_parser["general"]["name"] = global_quality_changes["general"]["name"]
        extruder_quality_changes_parser["general"]["definition"] = global_quality_changes["general"]["definition"]

        extruder_quality_changes_parser.add_section("metadata")
        extruder_quality_changes_parser["metadata"]["quality_type"] = global_quality_changes["metadata"]["quality_type"]
        extruder_quality_changes_parser["metadata"]["type"] = global_quality_changes["metadata"]["type"]
        extruder_quality_changes_parser["metadata"]["setting_version"] = str(4)
        extruder_quality_changes_parser["metadata"]["extruder"] = "fdmextruder"

        extruder_quality_changes_output = io.StringIO()
        extruder_quality_changes_parser.write(extruder_quality_changes_output)
        extruder_quality_changes_filename = quote_plus(new_filename) + ".inst.cfg"

        quality_changes_dir = Resources.getPath(CuraApplication.ResourceTypes.QualityInstanceContainer)

        with open(os.path.join(quality_changes_dir, extruder_quality_changes_filename), "w") as f:
            f.write(extruder_quality_changes_output.getvalue())
    def _build_request_parameters(self, params = None, separator = ";"):
        """
Build a HTTP query string based on the given parameters and the separator.

:param params: Query parameters as dict
:param separator: Query parameter separator

:return: (mixed) Response data; Exception on error
:since:  v1.0.0
        """

        _return = None

        if (isinstance(params, dict)):
            params_list = [ ]

            for key in params:
                if (type(params[key]) is not bool): params_list.append("{0}={1}".format(quote_plus(str(key), ""), quote_plus(str(params[key]), "")))
                elif (params[key]): params_list.append("{0}=1".format(quote_plus(str(key), "")))
                else: params_list.append("{0}=0".format(quote_plus(str(key), "")))
            #

            _return = separator.join(params_list)
        #

        return _return
Ejemplo n.º 20
0
def series_api(key, value=None):
	query = quote_plus(key)
	if value is not None:
		query += "=" + quote_plus(value)
	url = urljoin(iview_config['api_url'], '?' + query)
	index_data = maybe_fetch(url)
	return parser.parse_series_api(index_data)
Ejemplo n.º 21
0
    def test_organization_get_by_identifier(self):
        org_id_system = "http://test/system"
        org_id_value = "testval"
        self.login()
        org = Organization(name='test', id=999)
        ident = Identifier(id=99, system=org_id_system, value=org_id_value)
        org_ident = OrganizationIdentifier(
            organization_id=999, identifier_id=99)
        with SessionScope(db):
            db.session.add(org)
            db.session.add(ident)
            db.session.commit()
            db.session.add(org_ident)
            db.session.commit()

        # use api to obtain FHIR
        response = self.client.get(
            '/api/organization?system={system}&value={value}'.format(
                system=quote_plus(org_id_system), value=org_id_value))
        assert response.status_code == 200
        assert response.json['total'] == 1
        assert response.json['entry'][0]['id'] == 999

        # use alternative API to obtain organization
        response = self.client.get(
            '/api/organization/{value}?system={system}'.format(
                system=quote_plus(org_id_system), value=org_id_value))
        assert response.status_code == 200
        fetched = Organization.from_fhir(response.json)
        org = db.session.merge(org)
        assert org.id == fetched.id
        assert org.name == fetched.name
Ejemplo n.º 22
0
 def delete(self, entity, id):
     """Deletes a single entity row from an entity repository."""
     response = self.session.delete(
         self.url + "v1/" + quote_plus(entity) + "/" + quote_plus(id), headers=self._get_token_header()
     )
     response.raise_for_status()
     return response
Ejemplo n.º 23
0
    def get_pinterets_share_link(self,
                                 URL: str,
                                 media: str,
                                 description: str="",
                                 **kwargs) -> str:
        """
        Creates Pinterest share (pin) link with the UTM parameters.

        Arguments:
            URL -- Link that you want to share.
            media -- Media URL to pin.
            description (optional) -- Describe your pin.

        Keyword Arguments:
            You can pass query string parameters as keyword arguments.
            Example: utm_source, utm_medium, utm_campaign etc...

        Returns:
            URL -- Pinterest share (pin) link for the URL.
        """

        if "utm_source" not in kwargs:
            kwargs["utm_source"] = "pinterest"

        link = ("http://pinterest.com/pin/create/button/"
                "?media={media}&description={description}&url={URL}?{args}")
        link = link.format(
            URL=quote_plus(URL),
            media=quote_plus(media),
            description=quote_plus(description),
            args=quote_plus(urlencode(kwargs)),
        )

        return link
Ejemplo n.º 24
0
def print_conf(c):
    iss, tag = c.split('][', 2)
    fname= os.path.join('entities', quote_plus(unquote_plus(iss)), quote_plus(unquote_plus(tag)))
    cnf = json.loads(open(fname,'r').read())
    print(">>>", fname)
    print(json.dumps(cnf, sort_keys=True, indent=2,
                     separators=(',', ': ')))
Ejemplo n.º 25
0
def main():
    try:
        aws_secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
        aws_access_key = os.environ['AWS_ACCESS_KEY_ID']
    except KeyError:
        print("Please set your AWS_SECRET_ACCESS_KEY and AWS_ACCESS_KEY_ID environment variables.")
        return 1

    args = parse_args()

    connection = client.connect(args.host, error_trace=True)
    cur = connection.cursor()

    create_table(cur, os.path.join(os.path.dirname(__file__), "..", "schema.sql"))
    alter_table(cur, 0)

    for month in get_month_partitions(args.start, args.end):
        print('Importing Github data for {0} ...'.format(month))
        s3_url = 's3://{0}:{1}@crate.sampledata/github_archive/{2}-*'.format(quote_plus(aws_access_key),
            quote_plus(aws_secret_key), month)
        print('>>> {0}'.format(s3_url))
        cmd = '''COPY github PARTITION (month_partition=?) FROM ? WITH (compression='gzip')'''
        try:
            cur.execute(cmd, (month, s3_url,))
        except Exception as e:
            print("Error while importing {}: {}".format(s3_url, e))
            print(e.error_trace)

    alter_table(cur, 1)
    return 0
Ejemplo n.º 26
0
def make_webenv(config, rest):
    if args.tag:
        qtag = quote_plus(args.tag)
    else:
        qtag = 'default'

    ent_conf = None
    try:
        ent_conf = rest.construct_config(quote_plus(args.issuer), qtag)
    except Exception as err:
        print('iss:{}, tag:{}'.format(quote_plus(args.issuer), qtag))
        for m in traceback.format_exception(*sys.exc_info()):
            print(m)
        exit()

    setup_logging("%s/rp_%s.log" % (SERVER_LOG_FOLDER, args.port), logger)
    logger.info('construct_app_args')

    _path, app_args = construct_app_args(args, config, oper, func, profiles,
                                         ent_conf)

    # Application arguments
    app_args.update(
        {"msg_factory": oic_message_factory, 'profile_map': PROFILEMAP,
         'profile_handler': ProfileHandler,
         'client_factory': Factory(Client)})

    if args.insecure:
        app_args['client_info']['verify_ssl'] = False

    return app_args
Ejemplo n.º 27
0
    def put(self, key, value, cache=None, options={}):
        """Query the server to set the key specified to the value specified in
        the specified cache.

        Keyword arguments:
        key -- the name of the key to be set. Required.
        value -- the value to set key to. Must be a string or JSON
                 serialisable. Required.
        cache -- the cache to store the item in. Defaults to None, which uses
                 self.name. If no name is set, raises a ValueError.
        options -- a dict of arguments to send with the request. See
                   http://dev.iron.io/cache/reference/api/#put_item for more
                   information on defaults and possible values.
        """
        if cache is None:
            cache = self.name
        if cache is None:
            raise ValueError("Cache name must be set")

        if not isinstance(value, six.string_types) and not isinstance(value,
                six.integer_types):
            value = json.dumps(value)

        options["body"] = value
        body = json.dumps(options)

        cache = quote_plus(cache)
        key = quote_plus(key)

        result = self.client.put("caches/%s/items/%s" % (cache, key), body,
                {"Content-Type": "application/json"})
        return Item(cache=cache, key=key, value=value)
Ejemplo n.º 28
0
 def url(self):
     query = []
     if self.language is not None:
         query.append("l={}".format(quote_plus(self.language)))
     if self.since is not None:
         query.append("since={}".format(quote_plus(self.since)))
     return "https://github.com/trending?" + "&".join(query)
Ejemplo n.º 29
0
def _get_bearer_token(key, secret):
    """
    OAuth2 function using twitter's "Application Only" auth method.
    With key and secret, make a POST request to get a bearer token that is used in future API calls
    """

    creds = parse.quote_plus(key) + ':' + parse.quote_plus(secret)
    encoded_creds = base64.b64encode(creds.encode('ascii'))

    all_headers = {
        "Authorization": "Basic " + encoded_creds.decode(encoding='UTF-8'),
        "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8",
        "User-Agent": API_USER_AGENT,
    }

    body_content = {
        'grant_type': 'client_credentials'
    }

    resp = requests.post(
        'https://api.twitter.com/oauth2/token',
        data=body_content,
        headers=all_headers
    )

    json = resp.json()

    if json['token_type'] != 'bearer' or 'access_token' not in json:
        raise OAuthError("Did not receive proper bearer token on initial POST")

    return json['access_token']
Ejemplo n.º 30
0
 def get_attribute_meta_data(self, entity, attribute):
     """Retrieves the metadata for a single attribute of an entity repository."""
     response = self.session.get(
         self.url + "v1/" + quote_plus(entity) + "/meta/" + quote_plus(attribute), headers=self._get_token_header()
     ).json()
     response.raise_for_status()
     return response
Ejemplo n.º 31
0
 def excel_get_charts(self, item_id, worksheet_id, params=None, **kwargs):
     url = "https://graph.microsoft.com/beta/me/drive/items/{0}/workbook/worksheets/{1}/charts".format(item_id, quote_plus(worksheet_id))
     return self._get(url, params=params, **kwargs)
HOME_DIR = expanduser("~")


def get_absolute_path(path):
    """
    Returns absolute path.
    """
    if path.startswith("/"):
        return path
    else:
        return os.path.join(HOME_DIR, path)


postgres_kwargs = dict(
    user=quote_plus(GCSQL_POSTGRES_USER),
    password=quote_plus(GCSQL_POSTGRES_PASSWORD),
    public_port=GCSQL_POSTGRES_PUBLIC_PORT,
    public_ip=quote_plus(GCSQL_POSTGRES_PUBLIC_IP),
    project_id=quote_plus(GCP_PROJECT_ID),
    location=quote_plus(GCP_REGION),
    instance=quote_plus(GCSQL_POSTGRES_INSTANCE_NAME_QUERY),
    database=quote_plus(GCSQL_POSTGRES_DATABASE_NAME),
    client_cert_file=quote_plus(
        get_absolute_path(GCSQL_POSTGRES_CLIENT_CERT_FILE)),
    client_key_file=quote_plus(
        get_absolute_path(GCSQL_POSTGRES_CLIENT_KEY_FILE)),
    server_ca_file=quote_plus(
        get_absolute_path(GCSQL_POSTGRES_SERVER_CA_FILE)),
)
Ejemplo n.º 33
0
def installed_build_check():
    dialog = xbmcgui.Dialog()

    if not CONFIG.EXTRACT == '100' and CONFIG.EXTERROR > 0:
        logging.log(
            "[Build Installed Check] Build foi extraído {0}/100 with [ERROS: {1}]"
            .format(CONFIG.EXTRACT, CONFIG.EXTERROR),
            level=xbmc.LOGINFO)
        yes = dialog.yesno(
            CONFIG.ADDONTITLE,
            '[COLOR {0}]{2}[/COLOR] [COLOR {1}]não foi instalado corretamente![/COLOR]'
            .format(CONFIG.COLOR1, CONFIG.COLOR2, CONFIG.BUILDNAME) + '\n' +
            ('Instalado: [COLOR {0}]{1}[/COLOR] / '
             'Error Count: [COLOR {2}]{3}[/COLOR]').format(
                 CONFIG.COLOR1, CONFIG.EXTRACT, CONFIG.COLOR1, CONFIG.EXTERROR)
            + '\n' + 'Você gostaria de tentar de novo?[/COLOR]',
            nolabel='[B]Não, obrigado![/B]',
            yeslabel='[B]Tentar novamente[/B]')
        CONFIG.clear_setting('build')
        if yes:
            xbmc.executebuiltin(
                "PlayMedia(plugin://{0}/?mode=install&name={1}&url=fresh)".
                format(CONFIG.ADDON_ID, quote_plus(CONFIG.BUILDNAME)))
            logging.log("[Build Installed Check] Nova instalação reset",
                        level=xbmc.LOGINFO)
        else:
            logging.log("[Build Installed Check] Reinstall Ignored")
    elif CONFIG.SKIN in ['skin.confluence', 'skin.estuary', 'skin.estouchy']:
        logging.log("[Build Installed Check] Incorrect skin: {0}".format(
            CONFIG.SKIN),
                    level=xbmc.LOGINFO)
        defaults = CONFIG.get_setting('defaultskin')
        if not defaults == '':
            if os.path.exists(os.path.join(CONFIG.ADDONS, defaults)):
                if skin.skin_to_default(defaults):
                    skin.look_and_feel_data('restore')
        if not CONFIG.SKIN == defaults and not CONFIG.BUILDNAME == "":
            gui_xml = check.check_build(CONFIG.BUILDNAME, 'gui')

            response = tools.open_url(gui_xml, check=True)
            if not response:
                logging.log(
                    "[Build Installed Check] Guifix was set to http://",
                    level=xbmc.LOGINFO)
                dialog.ok(
                    CONFIG.ADDONTITLE,
                    "[COLOR {0}]Parece que as configurações de skin não foram aplicadas à build."
                    .format(CONFIG.COLOR2) + '\n' +
                    "Infelizmente, nenhuma correção de interface foi anexada a build"
                    + '\n' +
                    "Você precisará reinstalar a compilação e certificar-se de fazer um fechamento forçado[/COLOR]"
                )
            else:
                yes = dialog.yesno(
                    CONFIG.ADDONTITLE,
                    '{0} não foi instalado corretamente!'.format(
                        CONFIG.BUILDNAME) + '\n' +
                    'Parece que as configurações de skin não foram aplicadas à build.'
                    + '\n' + 'Você gostaria de aplicar o GuiFix?',
                    nolabel='[B]Não, Cancelar[/B]',
                    yeslabel='[B]Aplicar Fix[/B]')
                if yes:
                    xbmc.executebuiltin(
                        "PlayMedia(plugin://{0}/?mode=install&name={1}&url=gui)"
                        .format(CONFIG.ADDON_ID, quote_plus(CONFIG.BUILDNAME)))
                    logging.log(
                        "[Build Installed Check] Guifix tentando instalar")
                else:
                    logging.log(
                        '[Build Installed Check] Guifix url funcionando, mas cancelado: {0}'
                        .format(gui_xml),
                        level=xbmc.LOGINFO)
    else:
        logging.log(
            '[Build Installed Check] A instalação parece ter sido concluída corretamente',
            level=xbmc.LOGINFO)

    if CONFIG.get_setting('installed') == 'true':
        if CONFIG.get_setting('keeptrakt') == 'true':
            from resources.libs import traktit
            logging.log('[Build Installed Check] Restoring Trakt Data',
                        level=xbmc.LOGINFO)
            traktit.trakt_it('restore', 'all')
        if CONFIG.get_setting('keepdebrid') == 'true':
            from resources.libs import debridit
            logging.log('[Build Installed Check] Restoring Real Debrid Data',
                        level=xbmc.LOGINFO)
            debridit.debrid_it('restore', 'all')
        if CONFIG.get_setting('keeplogin') == 'true':
            from resources.libs import loginit
            logging.log('[Build Installed Check] Restoring Login Data',
                        level=xbmc.LOGINFO)
            loginit.login_it('restore', 'all')

        CONFIG.clear_setting('install')
Ejemplo n.º 34
0
 def gmail_url(self):
     mid = quote_plus(f':{self.message_id}')
     return f'https://mail.google.com/mail/u/0/#search/rfc822msgid{mid}'
Ejemplo n.º 35
0
 def excel_update_range(self, item_id, worksheets_id, **kwargs):
     url = "https://graph.microsoft.com/beta/me/drive/items/{0}/workbook/worksheets/{1}/range(address='A1:B2')".format(item_id, quote_plus(worksheets_id))
     return self._patch(url, **kwargs)
Ejemplo n.º 36
0
 def excel_add_row(self, item_id, worksheets_id, table_id, **kwargs):
     url = "https://graph.microsoft.com/beta/me/drive/items/{0}/workbook/worksheets/{1}/tables/{2}/rows".format(item_id, quote_plus(worksheets_id), table_id)
     return self._post(url, **kwargs)
Ejemplo n.º 37
0
 def excel_add_chart(self, item_id, worksheet_id, **kwargs):
     url = "https://graph.microsoft.com/beta/me/drive/items/{0}/workbook/worksheets/{1}/charts/add".format(item_id, quote_plus(worksheet_id))
     return self._post(url, **kwargs)
Ejemplo n.º 38
0
    def encoded_request(self):
        result = '<?xml version="1.0" encoding="UTF-8" ?><!DOCTYPE Query>'
        result += tostring(self._query, 'utf-8').decode('utf-8')

        return quote_plus(result)
Ejemplo n.º 39
0
def statusCache(guid):
    return '{base_url}/deviceStatus/now/{guid}'.format(base_url=BASE_URL,
                                                       guid=re.sub(
                                                           '(?i)\%2f', 'f',
                                                           quote_plus(guid)))
Ejemplo n.º 40
0
 def excel_get_specific_worksheet(self, item_id, worksheet_id, **kwargs):
     url = "https://graph.microsoft.com/beta/me/drive/items/{0}/workbook/worksheets/{1}".format(item_id, quote_plus(worksheet_id))
     return self._get(url, **kwargs)
Ejemplo n.º 41
0
def file_uri(path, brackets=True):
    if brackets:
        return "{0}{1}".format(path[0], quote_plus(path[1:]))
    return "({0})".format(path)
Ejemplo n.º 42
0
 def get_link(self, operator, dttm):
     return "https://s3.amazonaws.com/airflow-logs/{dag_id}/{task_id}/{execution_date}".format(
         dag_id=operator.dag_id,
         task_id=operator.task_id,
         execution_date=quote_plus(dttm.isoformat()),
     )
Ejemplo n.º 43
0
    def test_simple_http_resolver(self):

        self._mock_urls()

        # First we test with no config...
        config = {}
        self.assertRaises(ResolverException,
                          lambda: SimpleHTTPResolver(config))

        # Then we test missing source_prefix and uri_resolvable
        config = {'cache_root': self.SRC_IMAGE_CACHE}
        self.assertRaises(ResolverException,
                          lambda: SimpleHTTPResolver(config))

        # Then we test with the full config...
        #TODO: More granular testing of these settings...
        config = {
            'cache_root': self.SRC_IMAGE_CACHE,
            'source_prefix': 'http://www.mysite/',
            'source_suffix': '/accessMaster',
            'default_format': 'jp2',
            'head_resolvable': True,
            'uri_resolvable': True,
            'user': '******',
            'pw': 'TestPW',
        }

        self.app.resolver = SimpleHTTPResolver(config)
        self.assertEqual(self.app.resolver.source_prefix, 'http://www.mysite/')
        self.assertEqual(self.app.resolver.source_suffix, '/accessMaster')
        self.assertEqual(self.app.resolver.default_format, 'jp2')
        self.assertEqual(self.app.resolver.head_resolvable, True)
        self.assertEqual(self.app.resolver.uri_resolvable, True)
        self.assertEqual(self.app.resolver.user, 'TestUser')
        self.assertEqual(self.app.resolver.pw, 'TestPW')

        # Then we test with a barebones default config...
        config = {'cache_root': self.SRC_IMAGE_CACHE, 'uri_resolvable': True}

        self.app.resolver = SimpleHTTPResolver(config)
        self.assertEqual(self.app.resolver.source_prefix, '')
        self.assertEqual(self.app.resolver.source_suffix, '')
        self.assertEqual(self.app.resolver.default_format, None)
        self.assertEqual(self.app.resolver.head_resolvable, False)
        self.assertEqual(self.app.resolver.uri_resolvable, True)
        self.assertEqual(self.app.resolver.user, None)
        self.assertEqual(self.app.resolver.pw, None)

        # Finally with the test config for now....
        config = {
            'cache_root': self.SRC_IMAGE_CACHE,
            'source_prefix': 'http://sample.sample/',
            'source_suffix': '',
            'head_resolvable': True,
            'uri_resolvable': True
        }

        self.app.resolver = SimpleHTTPResolver(config)
        self.assertEqual(self.app.resolver.source_prefix,
                         'http://sample.sample/')
        self.assertEqual(self.app.resolver.source_suffix, '')
        self.assertEqual(self.app.resolver.default_format, None)
        self.assertEqual(self.app.resolver.head_resolvable, True)
        self.assertEqual(self.app.resolver.uri_resolvable, True)

        #Test with identifier only
        ident = '0001'
        expected_path = join(self.app.resolver.cache_root, '25')
        expected_path = join(expected_path, 'bbd')
        expected_path = join(expected_path, 'cd0')
        expected_path = join(expected_path, '6c3')
        expected_path = join(expected_path, '2d4')
        expected_path = join(expected_path, '77f')
        expected_path = join(expected_path, '7fa')
        expected_path = join(expected_path, '1c3')
        expected_path = join(expected_path, 'e4a')
        expected_path = join(expected_path, '91b')
        expected_path = join(expected_path, '032')
        expected_path = join(expected_path, 'loris_cache.tif')

        ii = self.app.resolver.resolve(self.app, ident, "")
        self.assertEqual(expected_path, ii.src_img_fp)
        self.assertEqual(ii.src_format, 'tif')
        self.assertTrue(isfile(ii.src_img_fp))

        #Test with a full uri
        #Note: This seems weird but idents resolve wrong and removes a slash from //
        ident = quote_plus('http://sample.sample/0001')
        expected_path = join(self.app.resolver.cache_root, 'http')
        expected_path = join(expected_path, '32')
        expected_path = join(expected_path, '3a5')
        expected_path = join(expected_path, '353')
        expected_path = join(expected_path, '8f5')
        expected_path = join(expected_path, '0de')
        expected_path = join(expected_path, '1d3')
        expected_path = join(expected_path, '011')
        expected_path = join(expected_path, '675')
        expected_path = join(expected_path, 'ebc')
        expected_path = join(expected_path, 'c75')
        expected_path = join(expected_path, '083')
        expected_path = join(expected_path, 'loris_cache.tif')

        self.assertFalse(exists(expected_path))
        ii = self.app.resolver.resolve(self.app, ident, "")
        self.assertEqual(expected_path, ii.src_img_fp)
        self.assertEqual(ii.src_format, 'tif')
        self.assertTrue(isfile(ii.src_img_fp))

        #Test with a bad identifier
        ident = 'DOESNOTEXIST'
        self.assertRaises(
            ResolverException,
            lambda: self.app.resolver.resolve(self.app, ident, ""))

        #Test with a bad url
        ident = quote_plus('http://sample.sample/DOESNOTEXIST')
        self.assertRaises(
            ResolverException,
            lambda: self.app.resolver.resolve(self.app, ident, ""))

        #Test with no content-type or extension or default format
        ident = '0002'
        self.assertRaises(
            ResolverException,
            lambda: self.app.resolver.resolve(self.app, ident, ""))

        #Test with invalid content-type
        ident = '0003'
        self.assertRaises(
            ResolverException,
            lambda: self.app.resolver.resolve(self.app, ident, ""))
Ejemplo n.º 44
0
 def urlencode(data):
     return (quote_plus(data))
Ejemplo n.º 45
0
def main():

    parser = argparse.ArgumentParser(
        prog="so-search", description="Search stackoverflow.com for a question."
    )
    parser.add_argument(
        "question", metavar="Question", type=str, help="question to be searched"
    )

    args = vars(parser.parse_args())
    search_string = args["question"]

    query_string = p.quote_plus(search_string)
    search_r = requests.get("https://stackoverflow.com/search?q=%s" % query_string)
    search_soup = bs4.BeautifulSoup(search_r.text, "html.parser")

    surrounding_div = search_soup.find_all("div", "question-summary search-result")[0]
    assert surrounding_div != None
    votes = surrounding_div.find("span").find("strong").text
    try:
        answers = (
            surrounding_div.find("div", "status answered-accepted").find("strong").text
        )
    except AttributeError:
        answers = None
    tag = search_soup.find_all("a", "question-hyperlink")[0]
    print(QUESTION + "QUESTION: " + CLEAR + tag["title"])

    question_url = p.urljoin(
        "https://stackoverflow.com/search?q=%s" % query_string, tag["href"]
    )
    print(LINK + "(%s)" % question_url + CLEAR)
    answer_r = requests.get(question_url)
    answer_soup = bs4.BeautifulSoup(answer_r.text, "html.parser")
    if not answers:
        answers = answer_soup.find("div", "subheader answers-subheader").find("h2")[
            "data-answercount"
        ]

    print("(%s Answers, %s Votes)\n" % (answers, votes))

    question_block = answer_soup.find("div", "question").find("div", "post-text")
    question_text = parse_so_text(question_block)

    try:
        answer_block = answer_soup.find_all("div", "answer accepted-answer")[0]
    except:
        try:
            answer_block = answer_soup.find_all("div", "answer")[0]
        except:
            print()
            print(question_text)
            print()
            print(CYAN + 'NO ANSWERS' + CLEAR)
            return
    answer_votes = answer_block.find("div", {"itemprop": "upvoteCount"})["data-value"]
    answer_text = parse_so_text(answer_block.find("div", "post-text"))

    print()
    print(question_text)
    print()
    print(CYAN + "ANSWER: %s UPVOTES" % answer_votes + CLEAR)
    print()
    print(answer_text)
    print()
Ejemplo n.º 46
0
from flask import Flask,render_template,request
from flask_pymongo import PyMongo
from urllib import parse
import os
from PIL import Image


app = Flask(__name__)

passwd = parse.quote_plus("MongoDBmima12!")
app.config['MONGO_DBNAME'] = 'test'
app.config['MONGO_URI'] = "".format(passwd)
mongo = PyMongo(app)
user = mongo.db.user
file = mongo.db.file

@app.route('/',methods=['POST','GET'])
def index():
    if request.method == 'POST':
        log_in_username = request.form.get('studentID')
        log_in_passwd = request.form.get('passwd')
        if bool([i for i in user.find({'studentID':log_in_username})])== True and [i for i in user.find({'studentID':log_in_username})][-1]['passwd'] == log_in_passwd:
            return render_template("card.html",username = [i for i in user.find({'studentID':log_in_username})][-1]['username'],commodity = file.find())
    return render_template("index.html")
    
@app.route('/test',methods=['POST','GET'])
def login():
    if request.method == 'POST':
        registered_username = request.form.get('name')
        registered_sex = request.form.get('sex')
        registered_studentID = request.form.get('studentID')
Ejemplo n.º 47
0
repopick_a_help = "/repopick to set repopick on or off\n"
reset_help = "/reset to set reset on or off\n"
repopick_b_help = "-- /repopick `changes` to pick from gerrit on build\n"
open_a_help = "/open to see all open changes\n"
open_b_help = "-- /open `projects` to see open changes for certain projects\n"
pickopen_help = "/pickopen to pick all open changes on gerrit\n"
help_help = "/help to see this message\n--/help 'command' to see information about that command :)" # love this lmao help_help

jenkinsnotmaster = "Sup *not* master. \n" + hereyago + open_a_help + open_b_help + help_help
nojenkinsnotmaster = "Sup *not* master. \n" + hereyago + open_a_help + open_b_help + help_help
jenkinsmaster = "Sup" + username + "\n" + hereyago + build_help + changelog_help + sync_help + clean_help + repopick_a_help + reset_help + repopick_b_help + pickopen_help + open_a_help + open_b_help + help_help
nojenkinsmaster = "Sup" + username + "\n" + hereyago + open_a_help + open_b_help + help_help


cg = "This is an automated build, provided by @BruhhJenkinsBot."
cg = quote_plus(cg)
syncparam = "true"
cleanparam = "false"
repopickstatus = "false"
rpick = ""  
reporeset = "false"
release = "false"
def get_admin_ids(bot, chat_id):
    """Returns a list of admin IDs for a given chat."""
    return [admin.user.id for admin in bot.getChatAdministrators(chat_id)]

def start(bot, update):
    if update.message.chat.type == "private":

        bot.sendChatAction(chat_id=update.message.chat_id,
                           action=ChatAction.TYPING)
Ejemplo n.º 48
0
 def encode(self, identifier):  # pylint: disable=no-self-use
     """Encode identifier to get rid of unsafe chars."""
     return quote_plus(identifier)
Ejemplo n.º 49
0
from bs4 import BeautifulSoup
import urllib.request as req
import urllib.parse as rep
import sys
import io
import os

sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')

opener = req.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
req.install_opener(opener)

base = "https://www.inflearn.com/"
quote = rep.quote_plus("추천-강좌")  #유니코드로변환
url = base + quote
res = req.urlopen(url).read().decode(
    req.urlopen(url).headers.get_content_charset())
savepath = "C:\\imagedown\\"  #C:/imagedown/
try:
    if not (os.path.isdir(savepath)):
        os.makedirs(os.path.join(savepath))
except OSError as e:
    if e.errno != errno.EEXIST:
        print("폴더 만들기 실패!")
        raise

soup = BeautifulSoup(res, "html.parser")

img_list1 = soup.select("ul.slides")[1]
Ejemplo n.º 50
0
def search_kobo(query, max_results=10, timeout=60, write_html_to=None):
    from css_selectors import Select
    url = 'https://www.kobobooks.com/search/search.html?q=' + quote_plus(query)

    br = get_browser()

    with closing(br.open(url, timeout=timeout)) as f:
        raw = f.read()
        if write_html_to is not None:
            with open(write_html_to, 'wb') as f:
                f.write(raw)
        doc = html.fromstring(raw)
        select = Select(doc)
        for i, item in enumerate(select('.result-items .item-wrapper.book')):
            if i == max_results:
                break
            for img in select('.item-image img[src]', item):
                cover_url = img.get('src')
                if cover_url.startswith('//'):
                    cover_url = 'https:' + cover_url
                break
            else:
                cover_url = None

            for p in select('h2.title', item):
                title = etree.tostring(p, method='text',
                                       encoding='unicode').strip()
                for a in select('a[href]', p):
                    url = a.get('href')
                    break
                else:
                    url = None
                break
            else:
                title = None
            if title:
                for p in select('p.subtitle', item):
                    title += ' - ' + etree.tostring(
                        p, method='text', encoding='unicode').strip()

            authors = []
            for a in select('.contributors a.contributor-name', item):
                authors.append(
                    etree.tostring(a, method='text',
                                   encoding='unicode').strip())
            authors = authors_to_string(authors)

            for p in select('p.price', item):
                price = etree.tostring(p, method='text',
                                       encoding='unicode').strip()
                break
            else:
                price = None

            if title and authors and url:
                s = SearchResult()
                s.cover_url = cover_url
                s.title = title
                s.author = authors
                s.price = price
                s.detail_item = url
                s.formats = 'EPUB'
                s.drm = SearchResult.DRM_UNKNOWN

                yield s
Ejemplo n.º 51
0
def get_aur_info(package_names: Sequence[str],
                 search: bool = False,
                 by_name: bool = False) -> List[Dict]:
    """
    Fetches AUR infos for package_names via AurJson.
    https://wiki.archlinux.org/index.php/AurJson

    :param package_names:   The names of the packages in a sequence
    :param search:          True if one wants to search instead of getting info
    :param by_name:         If one wants to search by name only
    :return:                A list containing the "results" values of the RPC answer.
    """

    max_query_length = 8000
    if not search:
        query_url = AurVars.aur_domain + "/rpc/?v=5&type=info"
        query_prefix = "&arg[]="
    else:
        query_url = AurVars.aur_domain + "/rpc/?v=5&type=search"
        if by_name:
            query_url += "&by=name"
        query_prefix = "&arg="
    query_url_length = len(query_url.encode("utf8"))
    query_prefix_length = len(query_prefix.encode("utf8"))

    # quote_plus needed for packages like libc++
    package_names = [
        quote_plus(package_name) for package_name in package_names
    ]

    queries_parameters = split_query_helper(max_query_length, query_url_length,
                                            query_prefix_length, package_names)

    results_list = []
    for query_parameters in queries_parameters:
        try:
            url = "{}{}".format(
                query_url, ''.join([
                    "{}{}".format(query_prefix, parameter)
                    for parameter in query_parameters
                ]))
            with urlopen(url, timeout=AurVars.aur_timeout) as response:
                results_list.extend(json.loads(response.read())['results'])
        except URLError:
            logging.error(
                "Connection problem while requesting AUR info for {}".format(
                    package_names),
                exc_info=True)
            raise ConnectionProblem(
                "Connection problem while requesting AUR info for {}".format(
                    package_names))
        except json.JSONDecodeError:
            logging.error(
                "Decoding problem while requesting AUR info for {}".format(
                    package_names),
                exc_info=True)
            raise InvalidInput(
                "Decoding problem while requesting AUR info for {}".format(
                    package_names))

    return results_list
Ejemplo n.º 52
0
    def _qqai_post(self, url, params):
        gc.collect()
        port = 443
        proto, dummy, host, path = url.split("/", 3)
        ai = usocket.getaddrinfo(host, port)
        # print(ai)
        addr = ai[0][4]
        s = usocket.socket()
        s.connect(addr)
        s = ussl.wrap_socket(s)
        s.write(b"%s /%s HTTP/1.0\r\n" % ('POST', path))
        s.write(b"Host: %s\r\n" % host)
        s.write(b"Connection: keep-alive\r\n")
        s.write(b"Content-Type: application/x-www-form-urlencoded\r\n")
        s.write(b"Transfer-Encoding: chunked\r\n")

        s.write(b"\r\n")
        temp_str = ''
        list_key = list(params.keys())
        for k in list_key:
            if not hasattr(params[k], 'read'):
                temp_str = k + '=' + parse.quote_plus(str(params[k]), safe='')
                if k is not list_key[-1]:
                    temp_str = temp_str + '&'

                chunk_size = hex(len(temp_str))[2:]
                s.write(chunk_size.encode())
                # print(chunk_size,end='')
                s.write(b'\r\n')
                # print()
                s.write(temp_str.encode())
                # print(temp_str,end='')
                s.write(b'\r\n')
                # print()

            else:
                temp_str = k + '='
                chunk_size = hex(len(temp_str))[2:]
                s.write(chunk_size.encode())
                s.write(b'\r\n')
                # print(chunk_size)
                s.write(temp_str.encode())
                s.write(b'\r\n')
                # print(temp_str)
                with open('.qqai_base64', 'r') as file_base64:
                    while True:
                        temp_str = file_base64.read(1024 * 3)
                        if temp_str:
                            temp_str = parse.quote_plus(temp_str, safe='')
                            chunk_size = hex(len(temp_str))[2:]
                            s.write((chunk_size + '\r\n').encode())
                            # print(chunk_size)
                            s.write((temp_str + '\r\n').encode())
                            # print(temp_str)
                        else:
                            break
                if k is not list_key[-1]:
                    s.write(b'1\r\n')
                    # print('1')
                    s.write(b'&\r\n')
                    # print('&')
        # chunked end
        s.write(b'0\r\n')
        # print('0')
        s.write(b'\r\n')
        # print('')

        l = s.readline()
        protover, status, msg = l.split(None, 2)
        # print(protover, status, msg)
        status = int(status)
        while True:
            l = s.readline()
            # print(l)
            if not l or l == b"\r\n":
                break
        return s
Ejemplo n.º 53
0
def _voicemail(call_sid):
    msg = 'Sorry, All agents are busy. Please leave a message. We will call you as soon as possible'
    route_url = 'http://twimlets.com/voicemail?Email=' + EMAIL + '&Message=' + quote_plus(
        msg)
    route_call(call_sid, route_url)
Ejemplo n.º 54
0
async def carbon_api(e):
    """ A Wrapper for carbon.now.sh """
    await e.edit("`Processing...`")
    CARBON = 'https://carbon.now.sh/?l={lang}&code={code}'
    global CARBONLANG
    textx = await e.get_reply_message()
    pcode = e.text
    if pcode[8:]:
        pcode = str(pcode[8:])
    elif textx:
        pcode = str(textx.message)  # Importing message to module
    code = quote_plus(pcode)  # Converting to urlencoded
    await e.edit("`Processing..\n25%`")
    if os.path.isfile(TEMP_DOWNLOAD_DIRECTORY + "/carbon.png"):
        os.remove(TEMP_DOWNLOAD_DIRECTORY + "/carbon.png")
    url = CARBON.format(code=code, lang=CARBONLANG)
    chrome_options = Options()
    chrome_options.add_argument("--headless")
    chrome_options.binary_location = GOOGLE_CHROME_BIN
    chrome_options.add_argument("--window-size=1920x1080")
    chrome_options.add_argument("--disable-dev-shm-usage")
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-gpu")
    prefs = {'download.default_directory': TEMP_DOWNLOAD_DIRECTORY}
    chrome_options.add_experimental_option('prefs', prefs)
    driver = webdriver.Chrome(executable_path=CHROME_DRIVER,
                              options=chrome_options)
    driver.get(url)
    await e.edit("`Processing..\n50%`")
    driver.command_executor._commands["send_command"] = (
        "POST", '/session/$sessionId/chromium/send_command')
    params = {
        'cmd': 'Page.setDownloadBehavior',
        'params': {
            'behavior': 'allow',
            'downloadPath': TEMP_DOWNLOAD_DIRECTORY
        }
    }
    command_result = driver.execute("send_command", params)
    driver.find_element_by_xpath("//button[contains(text(),'Export')]").click()
    # driver.find_element_by_xpath("//button[contains(text(),'4x')]").click()
    # driver.find_element_by_xpath("//button[contains(text(),'PNG')]").click()
    await e.edit("`Processing..\n75%`")
    # Waiting for downloading
    while not os.path.isfile(TEMP_DOWNLOAD_DIRECTORY + "/carbon.png"):
        await sleep(0.5)
    await e.edit("`Processing..\n100%`")
    file = TEMP_DOWNLOAD_DIRECTORY + "/carbon.png"
    await e.edit("`Uploading..`")
    await e.client.send_file(
        e.chat_id,
        file_path,
        caption=("Made using [Carbon](https://carbon.now.sh/about/),"
                 "\na project by [Dawn Labs](https://dawnlabs.io/)"),
        force_document=True,
        reply_to=e.message.reply_to_msg_id,
    )

    os.remove(TEMP_DOWNLOAD_DIRECTORY + "/carbon.png")
    driver.quit()
    # Removing carbon.png after uploading
    await e.delete()  # Deleting msg
    def get_pvr_artwork(self,
                        title,
                        channel,
                        genre="",
                        manual_select=False,
                        ignore_cache=False):
        """
            collect full metadata and artwork for pvr entries
            parameters: title (required)
            channel: channel name (required)
            year: year or date (optional)
            genre: (optional)
            the more optional parameters are supplied, the better the search results
        """
        details = {"art": {}}
        # try cache first

        # use searchtitle when searching cache
        cache_title = title.lower()
        cache_channel = channel.lower()
        searchtitle = self.get_searchtitle(cache_title, cache_channel)
        # original cache_str assignment cache_str = "pvr_artwork.%s.%s" % (title.lower(), channel.lower())
        cache_str = "pvr_artwork.%s.%s" % (searchtitle, channel.lower())
        cache = self._mutils.cache.get(cache_str)
        if cache and not manual_select and not ignore_cache:
            log_msg("get_pvr_artwork - return data from cache - %s" %
                    cache_str)
            details = cache
        else:
            # no cache - start our lookup adventure
            log_msg("get_pvr_artwork - no data in cache - start lookup - %s" %
                    cache_str)

            # workaround for recordings
            recordingdetails = self.lookup_local_recording(title, channel)
            if recordingdetails and not (channel and genre):
                genre = recordingdetails["genre"]
                channel = recordingdetails["channel"]

            details["pvrtitle"] = title
            details["pvrchannel"] = channel
            details["pvrgenre"] = genre
            details["cachestr"] = cache_str
            details["media_type"] = ""
            details["art"] = {}

            # filter genre unknown/other
            if not genre or genre.split(" / ")[0] in xbmc.getLocalizedString(
                    19499).split(" / "):
                details["genre"] = []
                genre = ""
                log_msg("genre is unknown so ignore....")
            else:
                details["genre"] = genre.split(" / ")
                details["media_type"] = self.get_mediatype_from_genre(genre)
            searchtitle = self.get_searchtitle(title, channel)

            # only continue if we pass our basic checks
            filterstr = self.pvr_proceed_lookup(title, channel, genre,
                                                recordingdetails)
            proceed_lookup = False if filterstr else True
            if not proceed_lookup and manual_select:
                # warn user about active skip filter
                proceed_lookup = xbmcgui.Dialog().yesno(
                    line1=self._mutils.addon.getLocalizedString(32027),
                    heading=xbmc.getLocalizedString(750))

            if proceed_lookup:

                # if manual lookup get the title from the user
                if manual_select:
                    if sys.version_info.major == 3:
                        searchtitle = xbmcgui.Dialog().input(
                            xbmc.getLocalizedString(16017),
                            searchtitle,
                            type=xbmcgui.INPUT_ALPHANUM)
                    else:
                        searchtitle = xbmcgui.Dialog().input(
                            xbmc.getLocalizedString(16017),
                            searchtitle,
                            type=xbmcgui.INPUT_ALPHANUM).decode("utf-8")
                    if not searchtitle:
                        return

                # if manual lookup and no mediatype, ask the user
                if manual_select and not details["media_type"]:
                    yesbtn = self._mutils.addon.getLocalizedString(32042)
                    nobtn = self._mutils.addon.getLocalizedString(32043)
                    header = self._mutils.addon.getLocalizedString(32041)
                    if xbmcgui.Dialog().yesno(header,
                                              header,
                                              yeslabel=yesbtn,
                                              nolabel=nobtn):
                        details["media_type"] = "movie"
                    else:
                        details["media_type"] = "tvshow"

                # append thumb from recordingdetails
                if recordingdetails and recordingdetails.get("thumbnail"):
                    details["art"]["thumb"] = recordingdetails["thumbnail"]
                # lookup custom path
                details = extend_dict(
                    details, self.lookup_custom_path(searchtitle, title))
                # lookup movie/tv library
                details = extend_dict(
                    details,
                    self.lookup_local_library(searchtitle,
                                              details["media_type"]))

                # do internet scraping if enabled
                if self._mutils.addon.getSetting("pvr_art_scraper") == "true":

                    log_msg(
                        "pvrart start scraping metadata for title: %s - media_type: %s"
                        % (searchtitle, details["media_type"]))

                    # prefer tmdb scraper
                    tmdb_result = self._mutils.get_tmdb_details(
                        "",
                        "",
                        searchtitle,
                        "",
                        "",
                        details["media_type"],
                        manual_select=manual_select,
                        ignore_cache=manual_select)
                    log_msg("pvrart lookup for title: %s - TMDB result: %s" %
                            (searchtitle, tmdb_result))
                    if tmdb_result:
                        details["media_type"] = tmdb_result["media_type"]
                        details = extend_dict(details, tmdb_result)

                    # fallback to tvdb scraper
                    # following 3 lines added as part of "auto refresh" fix. ensure manual_select=true for TVDB lookup. No idea why this works
                    tempmanualselect = manual_select
                    manual_select = "true"
                    log_msg(
                        "DEBUG INFO: TVDB lookup: searchtitle: %s channel: %s manual_select: %s"
                        % (searchtitle, channel, manual_select))
                    if (not tmdb_result
                            or (tmdb_result and not tmdb_result.get("art"))
                            or details["media_type"] == "tvshow"):
                        # original code: tvdb_match = self.lookup_tvdb(searchtitle, channel, manual_select=manual_select). part of "auto refresh" fix.
                        tvdb_match = self.lookup_tvdb(
                            searchtitle,
                            channel,
                            manual_select=manual_select,
                            tempmanualselect=tempmanualselect)
                        log_msg(
                            "pvrart lookup for title: %s - TVDB result: %s" %
                            (searchtitle, tvdb_match))
                        if tvdb_match:
                            # get full tvdb results and extend with tmdb
                            if not details["media_type"]:
                                details["media_type"] = "tvshow"
                            details = extend_dict(
                                details,
                                self._mutils.thetvdb.get_series(tvdb_match))
                            details = extend_dict(
                                details,
                                self._mutils.tmdb.
                                get_videodetails_by_externalid(
                                    tvdb_match,
                                    "tvdb_id"), ["poster", "fanart"])
                    # part of "auto refresh" fix - revert manual_select to original value
                    manual_select = tempmanualselect
                    # fanart.tv scraping - append result to existing art
                    if details.get(
                            "imdbnumber") and details["media_type"] == "movie":
                        details["art"] = extend_dict(
                            details["art"],
                            self._mutils.fanarttv.movie(details["imdbnumber"]),
                            ["poster", "fanart", "landscape"])
                    elif details.get(
                            "tvdb_id") and details["media_type"] == "tvshow":
                        details["art"] = extend_dict(
                            details["art"],
                            self._mutils.fanarttv.tvshow(details["tvdb_id"]),
                            ["poster", "fanart", "landscape"])

                    # append omdb details
                    if details.get("imdbnumber"):
                        details = extend_dict(
                            details,
                            self._mutils.omdb.get_details_by_imdbid(
                                details["imdbnumber"]), ["rating", "votes"])

                    # set thumbnail - prefer scrapers
                    thumb = ""
                    if details.get("thumbnail"):
                        thumb = details["thumbnail"]
                    elif details["art"].get("landscape"):
                        thumb = details["art"]["landscape"]
                    elif details["art"].get("fanart"):
                        thumb = details["art"]["fanart"]
                    elif details["art"].get("poster"):
                        thumb = details["art"]["poster"]
                    # use google images as last-resort fallback for thumbs - if enabled
                    elif self._mutils.addon.getSetting(
                            "pvr_art_google") == "true":
                        if manual_select:
                            google_title = searchtitle
                        else:
                            google_title = '%s %s' % (searchtitle, "imdb")
                        thumb = self._mutils.google.search_image(
                            google_title, manual_select)
                    if thumb:
                        details["thumbnail"] = thumb
                        details["art"]["thumb"] = thumb
                    # extrafanart
                    if details["art"].get("fanarts"):
                        for count, item in enumerate(
                                details["art"]["fanarts"]):
                            details["art"]["fanart.%s" % count] = item
                        if not details["art"].get("extrafanart") and len(
                                details["art"]["fanarts"]) > 1:
                            details["art"]["extrafanart"] = "plugin://script.skin.helper.service/"\
                                "?action=extrafanart&fanarts=%s" % quote_plus(repr(details["art"]["fanarts"]))

                    # download artwork to custom folder
                    if self._mutils.addon.getSetting(
                            "pvr_art_download") == "true":
                        details["art"] = download_artwork(
                            self.get_custom_path(searchtitle, title),
                            details["art"])

            log_msg("pvrart lookup for title: %s - final result: %s" %
                    (searchtitle, details))

        # always store result in cache
        # manual lookups should not expire too often
        if manual_select:
            self._mutils.cache.set(cache_str,
                                   details,
                                   expiration=timedelta(days=365))
        else:
            self._mutils.cache.set(cache_str,
                                   details,
                                   expiration=timedelta(days=365))
        return details
Ejemplo n.º 56
0
    def get_connection_to_athena(
        self,
        DbName: str,
        region_name: Optional[str] = None,
        S3QueryResultsLocation: Optional[str] = None,
    ) -> Dict[str, Union[str, sa.engine.Engine]]:
        """
        Connect Athena to an existing database

        Parameters
        ----------
        DbName : str
            Name of the glue database name.

        region_name : str, optional
            The region to connect to athena. The default region will be used if receives None.

        S3QueryResultsLocation : str, optional
            The s3 bucket where to store query results. The results will not be saved if received None.


        Returns
        -------
        db_url : str
            A sql alchemy connection string.
        engine : sqlalchemy.engine.Engine
            A sql alchemy engine.

        Example
        --------
        >>> from aws.utils.notebooks.database import AthenaUtils
        >>> from sqlalchemy.engine import create_engine
        >>> from aws.utils.notebooks.common import get_workspace
        >>> (db_url,engine) = AthenaUtils.get_connection_to_athena(
        ...     DbName = glue_db,
        ...     my_region = my_region,
        ...     S3QueryResultsLocation = results_location)
        """

        workspace = get_workspace()
        if region_name == None:
            region_name = workspace["region"]

        if S3QueryResultsLocation == None:
            S3QueryResultsLocation = f"s3://{workspace['ScratchBucket']}/{workspace['team_space']}/athena"

        template_con_str = (
            "awsathena+rest://athena.{region_name}.amazonaws.com:443/"
            "{schema_name}?s3_staging_dir={s3_staging_dir}")
        conn_str = template_con_str.format(
            region_name=region_name,
            schema_name=DbName,
            s3_staging_dir=quote_plus(S3QueryResultsLocation),
        )

        engine = create_engine(conn_str)
        self.db_url = conn_str
        self.current_engine = engine
        self.db_class = "athena"
        return {
            "db_url": self.db_url,
            "engine": self.current_engine,
        }
Ejemplo n.º 57
0
    def get_su(self):

        username_quote = quote_plus(self.user)
        username_base64 = base64.b64encode(username_quote.encode("utf-8"))
        return username_base64.decode("utf-8")
Ejemplo n.º 58
0
    def _obtain_token(  # The verb "obtain" is influenced by OAUTH2 RFC 6749
        self,
        grant_type,
        params=None,  # a dict to be sent as query string to the endpoint
        data=None,  # All relevant data, which will go into the http body
        headers=None,  # a dict to be sent as request headers
        post=None,  # A callable to replace requests.post(), for testing.
        # Such as: lambda url, **kwargs:
        #   Mock(status_code=200, text='{}')
        **kwargs  # Relay all extra parameters to underlying requests
    ):  # Returns the json object came from the OAUTH2 response
        _data = {'client_id': self.client_id, 'grant_type': grant_type}

        if self.default_body.get(
                "client_assertion_type") and self.client_assertion:
            # See https://tools.ietf.org/html/rfc7521#section-4.2
            encoder = self.client_assertion_encoders.get(
                self.default_body["client_assertion_type"], lambda a: a)
            _data["client_assertion"] = encoder(
                self.client_assertion()  # Do lazy on-the-fly computation
                if callable(self.client_assertion) else self.client_assertion)

        _data.update(self.default_body)  # It may contain authen parameters
        _data.update(data or {})  # So the content in data param prevails
        _data = {k: v for k, v in _data.items() if v}  # Clean up None values

        if _data.get('scope'):
            _data['scope'] = self._stringify(_data['scope'])

        _headers = {'Accept': 'application/json'}
        _headers.update(self.default_headers)
        _headers.update(headers or {})

        # Quoted from https://tools.ietf.org/html/rfc6749#section-2.3.1
        # Clients in possession of a client password MAY use the HTTP Basic
        # authentication.
        # Alternatively, (but NOT RECOMMENDED,)
        # the authorization server MAY support including the
        # client credentials in the request-body using the following
        # parameters: client_id, client_secret.
        if self.client_secret and self.client_id:
            _headers["Authorization"] = "Basic " + base64.b64encode(
                "{}:{}".format(
                    # Per https://tools.ietf.org/html/rfc6749#section-2.3.1
                    # client_id and client_secret needs to be encoded by
                    # "application/x-www-form-urlencoded"
                    # https://www.w3.org/TR/html401/interact/forms.html#h-17.13.4.1
                    # BEFORE they are fed into HTTP Basic Authentication
                    quote_plus(self.client_id),
                    quote_plus(
                        self.client_secret)).encode("ascii")).decode("ascii")

        if "token_endpoint" not in self.configuration:
            raise ValueError("token_endpoint not found in configuration")
        resp = (post or self._http_client.post)(
            self.configuration["token_endpoint"],
            headers=_headers,
            params=params,
            data=_data,
            **kwargs)
        if resp.status_code >= 500:
            resp.raise_for_status()  # TODO: Will probably retry here
        try:
            # The spec (https://tools.ietf.org/html/rfc6749#section-5.2) says
            # even an error response will be a valid json structure,
            # so we simply return it here, without needing to invent an exception.
            return json.loads(resp.text)
        except ValueError:
            self.logger.exception("Token response is not in json format: %s",
                                  resp.text)
            raise
Ejemplo n.º 59
0
    def _get_js_resources(self) -> iter:
        """下载并迭代返回所有页面里的js资源,用于查找各种docid..."""
        try:
            html = None
            with self._jspages_locker:
                if self._jspages_listpage is None:
                    # 拿资源列表页面
                    url = (
                        'https://www.facebook.com/ajax/bootloader-endpoint/?' +
                        'modules=NotificationList.react%2CNotificationJewelL' +
                        'ist.react%2CNotificationAsyncWrapper%2CNotification' +
                        'Store%2CNotificationJewelController%2CMercuryJewel%' +
                        '2CMercuryThreadInformer%2CMessengerState.bs%2CMesse' +
                        'ngerGraphQLThreadlistFetcher.bs%2CMercuryServerRequ' +
                        'ests%2CMercuryJewelUnreadCount.bs&' + '__user='******'&_a=1&' + '__req=' +
                        self._req.get_next() + '&__be=1&' +
                        '_pc=PHASED%3Aufi_home_page_pkg&dpr=1&' + '__rev=' +
                        parse.quote_plus(self._rev) + '&fb_dtsg_ag=' +
                        parse.quote_plus(self.fb_dtsg_ag) + '&jazoest=' +
                        self.jazoest + '&__spin_r=' +
                        parse.quote_plus(self._spin_r) + '&__spin_b=' +
                        parse.quote_plus(self._spin_b) + '&__spin_t=' +
                        parse.quote_plus(self._spin_t))

                    html = self._ha.getstring(url,
                                              headers="""
                            accept: */*
                            accept-encoding: gzip, deflate
                            accept-language: zh-CN,zh;q=0.9
                            referer: https://www.facebook.com/""")

                    if not isinstance(html, str) or html == "":
                        self._logger.error("Get docid js pages failed")
                        return

                    self._jspages_listpage = html

                if len(self._jspages_itemurls) < 1:
                    # 解析资源列表页面
                    matches = self.re_js_resoures.findall(html)
                    if matches is None or not any(matches):
                        raise Exception("Get js resources failed")
                    for m in matches:
                        try:
                            if len(m) != 2:
                                continue
                            n = m[0]
                            u = m[1]
                            u = u.replace('\\', '')
                            if not self._jspages_itemurls.__contains__(n):
                                self._jspages_itemurls[n] = u
                        except Exception:
                            self._logger.trace(
                                "Get docid for contact parse item url error: {} {}"
                                .format(m, traceback.format_exc()))
                    self._logger.info(
                        "Got js resources list, {} count={}".format(
                            self.uname_str, len(self._jspages_itemurls)))

                # fbcookie = self._ha._managedCookie.get_cookie_for_domain(
                #     "https://www.facebook.com/")
                # self._ha._managedCookie.add_cookies(uridocid.netloc, fbcookie)
                for jsurl in self._jspages_itemurls.items():
                    try:
                        if self._jspages.__contains__(jsurl[0]):
                            yield self._jspages[jsurl[0]]
                        else:
                            jspage = self._ha.getstring(jsurl[1],
                                                        headers="""
                                    Origin: https://www.facebook.com
                                    Referer: https://www.facebook.com/""")

                            self._jspages[jsurl[0]] = (jsurl[1], jspage)
                            self._logger.debug("Got js resource: {} {}".format(
                                self.uname_str, jsurl[1]))
                            yield self._jspages[jsurl[0]]
                    except Exception:
                        self._logger.error(
                            "Download js resources error: {} {}".format(
                                self.uname_str, traceback.format_exc()))

        except Exception:
            self._logger.error("Get js resources error: {} {}".format(
                self.uname_str, traceback.format_exc()))
Ejemplo n.º 60
0
def string_connection(database):
    return quote_plus('DRIVER=' + driver + ';SERVER=' + server_bd_corporativo +
                      ';DATABASE=' + database + ';UID=' +
                      username_bd_corporativo + ';PWD=' +
                      password_bd_corporativo)