Esempio n. 1
0
File: JFS.py Progetto: nuth/jottalib
    def escapeUrl(self, url):
        if isinstance(url, unicode):
            url = url.encode('utf-8') # urls have to be bytestrings
        separators = [
            '?dl=true',
            '?mkDir=true',
            '?dlDir=true',
            '?mvDir=',
            '?mv=',
            '?mode=list',
            '?mode=bin',
            '?mode=thumb&ts='
        ]
        separator = separators[0]
        for sep in separators:
            if sep in url:
                separator = sep
                break

        urlparts = url.rsplit(separator, 1)
        if(len(urlparts) == 2):
            url = quote(urlparts[0], safe=self.rootpath) + separator + urlparts[1]
        else:
            url = quote(urlparts[0], safe=self.rootpath)
        return url
Esempio n. 2
0
 def build_dlna_play_container(udn, server_type, path):
     s = "dlna-playcontainer://" + quote(udn)
     s += "?"
     s += 'sid=' + quote(server_type)
     s += '&cid=' + quote(path)
     s += '&md=0'
     return s
Esempio n. 3
0
    def append_url_with_template_parameters(url,
                                            parameters):
        """Replaces template parameters in the given url.

        Args:
            url (str): The query url string to replace the template parameters.
            parameters (dict): The parameters to replace in the url.

        Returns:
            str: URL with replaced parameters.

        """
        # Parameter validation
        if url is None:
            raise ValueError("URL is None.")
        if parameters is None:
            return url

        # Iterate and replace parameters
        for key in parameters:
            element = parameters[key]
            replace_value = ""

            # Load parameter value
            if element is None:
                replace_value = ""
            elif isinstance(element, list):
                replace_value = "/".join(quote(str(x), safe='') for x in element)
            else:
                replace_value = quote(str(element), safe='')

            url = url.replace('{{{0}}}'.format(key), str(replace_value))

        return url
def get_branch_request(base_url, project, branch, auth=None):
    if not project or not branch:
        return None
    project = quote(str(project), '')
    branch = quote(str(branch), '')
    endpoint = ('/api/v3/projects/{project}/repository/branches/{branch}'
                .format(project=project, branch=branch))
    return get_request('get', base_url, endpoint, auth=auth)
Esempio n. 5
0
	def deleteGroup(self, groupName, recursive=False):
		''' Removes a given group.
		Args: 
		   groupName : name of the group
		   recursive : enforce recursive removal 
		Returns:
		   nothing
		'''
		self.logger.debug('deleteGroup ' + groupName)
		if recursive :
			self._delete('group/' + quote(groupName,''), params={'recursive':'on'})
		else :
			self._delete('group/' + quote(groupName,''))
Esempio n. 6
0
    def url(self, resource_name, resource_id=None, sub_resource=None,
            sub_resource_id=None, **kwargs):
        '''\
        Build a request url from path fragments and query parameters

        :returns: str\
        '''
        path = (self.base_url, resource_name, resource_id,
                sub_resource, sub_resource_id)
        url = '/'.join(p for p in path if p) + '.json'
        if kwargs:
            url += '?' + '&'.join(quote(str(k)) + '=' + quote(str(v))
                                  for k, v in iteritems(kwargs))
        return url
Esempio n. 7
0
    def wrapper(request, *args, **kwargs):
        """
        Wrap the function.
        """
        if not request.GET.get(FRESH_LOGIN_PARAMETER):
            # The enterprise_login_required decorator promises to set the fresh login URL
            # parameter for this URL when it was the agent that initiated the login process;
            # if that parameter isn't set, we can safely assume that the session is "stale";
            # that isn't necessarily an issue, though. Redirect the user to
            # log out and then come back here - the enterprise_login_required decorator will
            # then take effect prior to us arriving back here again.
            enterprise_customer = get_enterprise_customer_or_404(
                kwargs.get('enterprise_uuid'))
            provider_id = enterprise_customer.identity_provider or ''
            sso_provider = get_identity_provider(provider_id)
            if sso_provider:
                # Parse the current request full path, quote just the path portion,
                # then reconstruct the full path string.
                # The path and query portions should be the only non-empty strings here.
                scheme, netloc, path, params, query, fragment = urlparse(
                    request.get_full_path())
                redirect_url = urlunparse(
                    (scheme, netloc, quote(path), params, query, fragment))

                return redirect('{logout_url}?{params}'.format(
                    logout_url='/logout',
                    params=urlencode({'redirect_url': redirect_url})))
        return view(request, *args, **kwargs)
Esempio n. 8
0
    def wrapper(request, *args, **kwargs):
        """
        Wrap the decorator.
        """
        if 'enterprise_uuid' not in kwargs:
            raise Http404

        enterprise_uuid = kwargs['enterprise_uuid']
        enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)

        # Now verify if the user is logged in. If user is not logged in then
        # send the user to the login screen to sign in with an
        # Enterprise-linked IdP and the pipeline will get them back here.
        if not request.user.is_authenticated:
            parsed_current_url = urlparse(request.get_full_path())
            parsed_query_string = parse_qs(parsed_current_url.query)
            parsed_query_string.update({
                'tpa_hint': enterprise_customer.identity_provider,
                FRESH_LOGIN_PARAMETER: 'yes'
            })
            next_url = '{current_path}?{query_string}'.format(
                current_path=quote(parsed_current_url.path),
                query_string=urlencode(parsed_query_string, doseq=True))
            return redirect('{login_url}?{params}'.format(
                login_url='/login', params=urlencode({'next': next_url})))

        # Otherwise, they can proceed to the original view.
        return view(request, *args, **kwargs)
Esempio n. 9
0
    def test_create_folder(self):
        folder_name = "test folder5"
        res = self.nxc_local.create_folder(folder_name)
        assert res.is_ok
        assert res.raw.status_code == self.CREATED_CODE

        # test uploaded file can be found with list_folders
        file_nextcloud_href = quote(
            os.path.join(WebDAV.API_URL, self.user_username,
                         folder_name)) + "/"
        folder_info = self.nxc_local.list_folders(path=folder_name)
        assert folder_info.is_ok
        assert len(folder_info.data) == 1
        assert isinstance(folder_info.data[0], dict)
        # check href
        assert folder_info.data[0]['href'] == file_nextcloud_href
        # check that created file type is a collection
        assert folder_info.data[0]['resource_type'] == self.COLLECTION_TYPE

        nested_folder_name = "test folder5/nested/folder"
        res = self.nxc_local.assure_tree_exists(nested_folder_name)
        folder_info = self.nxc_local.list_folders(path=nested_folder_name)
        assert folder_info.is_ok
        assert len(folder_info.data) == 1

        # check 405 status code if location already exists
        res = self.nxc_local.create_folder(folder_name)
        assert not res.is_ok
        assert res.raw.status_code == self.ALREADY_EXISTS_CODE

        # delete folder
        res = self.nxc_local.delete_path(folder_name)
        assert res.is_ok
        assert res.raw.status_code == self.NO_CONTENT_CODE
Esempio n. 10
0
    def get_by_name(self, name):
        """Get detailed info about specified project."""

        request_path = "{api_path}{name}".format(api_path=self.api_path,
                                                 name=requests_utils.quote(
                                                     name, safe=''))
        return self.connection.get_request(request_path)
Esempio n. 11
0
    def get_tags(self, name, limit=None, skip=None, pattern_dispatcher=None):
        """Get the tags for a project.

        :param name: Name of the project.
        :param limit: Int value that allows to limit the number of tags
                      to be included in the output results.
        :param skip: Int value that allows to skip the given number of tags
                     from the beginning of the list
        :param pattern_dispatcher: Pattern type (as a dict) with respective
                                   pattern value: {('match'|'regex') : value}
        """

        pattern_types = {'match': 'm', 'regex': 'r'}

        p, v = None, None
        if pattern_dispatcher is not None and pattern_dispatcher:
            for item in pattern_types:
                if item in pattern_dispatcher:
                    p, v = pattern_types[item], pattern_dispatcher[item]
                    break
            else:
                raise ValueError("Pattern types can be either "
                                 "'match' or 'regex'.")

        params = {
            k: v
            for k, v in (('n', limit), ('s', skip), (p, v)) if v is not None
        }

        request_path = "{api_path}{name}/tags".format(
            api_path=self.api_path, name=requests_utils.quote(name, safe=''))
        return self.connection.get_request(request_path, params=params)
Esempio n. 12
0
def lookup_in_cdx(qurl, cdx_server='http://bigcdx:8080/data-heritrix'):
    """
    Checks if a resource is in the CDX index.
    :return:
    """
    query = "%s?q=type:urlquery+url:%s" % (cdx_server, quote(qurl))
    r = requests.get(query)
    print(r.url)
    logger.debug("Availability response: %d" % r.status_code)
    print(r.status_code, r.text)
    # Is it known, with a matching timestamp?
    if r.status_code == 200:
        try:
            dom = xml.dom.minidom.parseString(r.text)
            for result in dom.getElementsByTagName('result'):
                file = result.getElementsByTagName(
                    'file')[0].firstChild.nodeValue
                compressedoffset = result.getElementsByTagName(
                    'compressedoffset')[0].firstChild.nodeValue
                return file, compressedoffset
        except Exception as e:
            logger.error("Lookup failed for %s!" % qurl)
            logger.exception(e)
        #for de in dom.getElementsByTagName('capturedate'):
        #    if de.firstChild.nodeValue == self.ts:
        #        # Excellent, it's been found:
        #        return
    return None, None
Esempio n. 13
0
def main():
    """ Production call - loop through all Cascade CMS sites and 
    fix improper filenames, keeping a record of the resources changed in 
    a local JSON file """
    # Create rule enforcer
    cpass = quote(os.environ.get('CASCADE_CMS_PASS', ''))
    cuser = os.environ.get('CASCADE_CMS_USER', '')
    restapi = os.environ.get('CASCADE_CMS_REST_API_BASE', '')
    rule_enforcer = CascadeCMSFileNameRuleEnforcer(cpass=cpass,
                                                   cuser=cuser,
                                                   restapi=restapi)
    site_dicts = []
    for s in rule_enforcer.get_all_sites():
        site_name = s['path']['path']
        site_id = s['id']
        # Start with the base of site_name/content. initialize a
        print(f"Beginning scan for invalid filenames in site {site_name}")
        site_dictionary = {
            site_name: {
                'bad_assets':
                rule_enforcer.traverse(
                    current_parent_folder=f'{site_name}/content',
                    site_full_assets_list=[],
                    skip_sites=[
                        "_Auto-Migrated Global_", "_skeleton.cofc.edu"
                    ])
            }
        }
        site_dictionary[site_name][
            'publish_result'] = rule_enforcer.publish_site(site_id)
        site_dicts.append(site_dictionary)
        with open('site_read.json', 'w') as f:
            json.dump(site_dicts, f)
        print(f"Completed scan of site {site_name}")
Esempio n. 14
0
def escape(url):
    """
prototype::
    arg = str: url ;
          the link that must be escaped

    return = str ;
             the escaped ¨http version of the url


This function escapes the url using the ¨http ¨ascii convention. Here is an
example of use.

pyterm::
    >>> from mistool.url_use import escape
    >>> print(escape("http://www.vivaespaña.com/camión/"))
    http://www.vivaespa%C3%B1a.com/cami%C3%B3n/


info::
    The function uses the global string ``CHAR_TO_KEEP = "/:#&?="`` which
    contains the characters that mustn't be escaped.
    """
    return quote(
        string = url,
        safe   = CHAR_TO_KEEP
    )
Esempio n. 15
0
def lookup_in_cdx(qurl):
    """
    Checks if a resource is in the CDX index.
    :return:
    """
    query = "%s?q=type:urlquery+url:%s" % (systems().cdxserver, quote(qurl))
    r = requests.get(query)
    print(r.url)
    app.logger.debug("Availability response: %d" % r.status_code)
    print(r.status_code, r.text)
    # Is it known, with a matching timestamp?
    if r.status_code == 200:
        try:
            dom = xml.dom.minidom.parseString(r.text)
            for result in dom.getElementsByTagName('result'):
                file = result.getElementsByTagName('file')[0].firstChild.nodeValue
                compressedoffset = result.getElementsByTagName('compressedoffset')[0].firstChild.nodeValue
                return file, compressedoffset
        except Exception as e:
            app.logger.error("Lookup failed for %s!" % qurl)
            app.logger.exception(e)
        #for de in dom.getElementsByTagName('capturedate'):
        #    if de.firstChild.nodeValue == self.ts:
        #        # Excellent, it's been found:
        #        return
    return None, None
Esempio n. 16
0
 def read_queries_from_file(self):
     """
     read queries from file and qute it as urlencoded
     """
     with open(self.queries_file, 'r') as f:
         for line in f:
             self.queries.append(quote(line))
Esempio n. 17
0
    def get_dois_from_journal_issn(self,
                                   issn,
                                   rows=500,
                                   pub_after=2000,
                                   mailto="*****@*****.**"):
        '''
    Grabs a set of unique DOIs based on a journal ISSN using the CrossRef API

    :param issn: The ISSN of the journal
    :type issn: str

    :param rows: the maximum number of DOIs to find
    :type rows: int

    :param pub_after: the minimum publication year for DOIs returned
    :type pub_after: int

    :param mailto: mailto address for API
    :type rows: str

    :returns: the unique set of DOIs as a list
    :rtype: list
    '''

        dois = []
        base_url = 'https://api.crossref.org/journals/' + issn + '/works?filter=from-pub-date:' + str(
            pub_after)
        max_rows = 1000  #Defined by CrossRef API

        headers = {
            'Accept': 'application/json',
            'User-agent': 'mailto:' + mailto
        }

        if rows <= max_rows:  #No multi-query needed
            search_url = str(base_url) + '&rows=' + str(rows)
            response = requests.get(search_url,
                                    headers=headers,
                                    timeout=self.timeout_sec).json()

            for item in response["message"]["items"]:
                dois.append(item["DOI"])

        else:  #Need to split queries
            cursor = "*"
            keep_paging = True
            while (keep_paging):
                sleep(self.sleep_sec)
                r = requests.get(base_url + "&rows=" + str(max_rows) +
                                 "&cursor=" + cursor,
                                 headers=headers,
                                 timeout=self.timeout_sec)
                cursor = quote(r.json()['message']['next-cursor'], safe='')
                if len(r.json()['message']['items']) == 0:
                    keep_paging = False

                for item in r.json()['message']['items']:
                    dois.append(item['DOI'])

        return list(set(dois))
def maze_Poster():

	if os.path.exists(pathLoc+"events"):
		with open(pathLoc+"events", "r") as f:
			titles = f.readlines()

		titles = list(dict.fromkeys(titles))
		n = len(titles)
		downloaded = 0
		for i in range(n):
			title = titles[i]
			title = title.strip()
			dwnldFile = pathLoc + "poster/{}.jpg".format(title)
			if not os.path.exists(dwnldFile):
				url = "http://api.tvmaze.com/search/shows?q={}".format(quote(title))
				try:
					url = requests.get(url).json()
					url = url[0]['show']['image']['medium']
					if url:
						w = open(dwnldFile, 'wb').write(requests.get(url, stream=True, allow_redirects=True).content)
						downloaded += 1
						w.close()
				except:
					pass

		now = datetime.now()
		dt = now.strftime("%d/%m/%Y %H:%M:%S")
		with open("/tmp/up_report", "a+") as f:
			f.write("maze_poster end : {} (downloaded : {})\n".format(dt, str(downloaded)))
		brokenImageRemove()
Esempio n. 19
0
def gplus_share(title):
    url = []
    cursor = g.conn.execute("SELECT content FROM item WHERE title = %s;",
                            title)
    for result in cursor:
        url.append('https://plus.google.com/share?url=' + quote(result[0]))
    if 'email' in session:
        email = session['email']
        iid = []
        cursor = g.conn.execute("SELECT iid FROM item WHERE title = %s;",
                                title)
        for result in cursor:
            iid.append(result[0])
        sid = []
        cursor = g.conn.execute("SELECT sid FROM cover WHERE iid = %s;",
                                iid[0])
        for result in cursor:
            sid.append(result[0])
        g.conn = engine.connect()
        print datetime.datetime.now()
        g.conn.execute(
            "INSERT INTO user_share (share_time, platform, iid, sid, email) VALUES (%s, %s, %s, %s, %s);",
            (datetime.datetime.now(), 'google_plus', iid[0], sid[0], email))
        g.conn.close()
        gc.collect()
    return redirect(url[0])
    return render_template("index.html")
Esempio n. 20
0
def twitter_share(title):
    url = []
    cursor = g.conn.execute("SELECT content FROM item WHERE title = %s;",
                            title)
    for result in cursor:
        url.append(
            'https://twitter.com/home?status=Discovered%20via%20Newsie%3A%20' +
            quote(result[0]))
    if 'email' in session:
        email = session['email']
        iid = []
        cursor = g.conn.execute("SELECT iid FROM item WHERE title = %s;",
                                title)
        for result in cursor:
            iid.append(result[0])
        sid = []
        cursor = g.conn.execute("SELECT sid FROM cover WHERE iid = %s;",
                                iid[0])
        for result in cursor:
            sid.append(result[0])
        g.conn = engine.connect()
        print datetime.datetime.now()
        g.conn.execute(
            "INSERT INTO user_share (share_time, platform, iid, sid, email) VALUES (%s, %s, %s, %s, %s);",
            (datetime.datetime.now(), 'twitter', iid[0], sid[0], email))
        g.conn.close()
        gc.collect()
    return redirect(url[0])
    return render_template("index.html")
Esempio n. 21
0
    def create_branch(self, name, branch_name, revision=None):
        """Create a new branch.

        :param name: Name of the project
        :param branch_name: Name of the branch
        :param revision: The base revision of the new branch. If not set,
                         HEAD will be used as base revision.
        :return: A BranchInfo entity that describes the created branch.
        """

        data = {'revision': revision}
        request_path = "{api_path}{name}/branches/{branch_name}".format(
            api_path=self.api_path,
            name=requests_utils.quote(name, safe=''),
            branch_name=requests_utils.quote(branch_name, safe=''))
        return self.connection.put_request(request_path, json_data=data)
Esempio n. 22
0
    def email_token(user):
        try:
            now_millis = int(round(time.time() * 1000))
            plain_token = str(user.id) + "/" + str(now_millis)
            logging.debug('plain_token: "{}"'.format(plain_token))
            encoded_token = base64.urlsafe_b64encode(force_bytes(plain_token))
            logging.debug('encoded_token: "{}"'.format(encoded_token))
            token_str = quote(encoded_token.decode('utf-8'))
            logging.debug('token_str: "{}"'.format(token_str))

            msg = MIMEMultipart()
            msg['From'] = '"ETAbot" <*****@*****.**>'
            msg['To'] = user.email
            msg['Subject'] = EMAIL_SUBJECT
            hyper_link = TOKEN_LINK.format(SYS_DOMAIN, token_str)
            msg_body = render_to_string('acc_active_email.html', {
                'username': user.username,
                'link': hyper_link,
            })
            msg.attach(MIMEText(msg_body, 'html'))

            ActivationProcessor.send_email(msg)

            logging.info('Successfully sent activation email to User %s ' %
                         user.username)
        except Exception as ex:
            logging.error('Failed to send activation email to User %s: %s' %
                          (user.username, str(ex)))
            raise ex
Esempio n. 23
0
    def translate(self, word, **kwargs):

        if self._validate_payload(word):
            url = "{}{}-{}/{}".format(self.__base_url, self._source,
                                      self._target, quote(word))
            response = requests.get(url)
            soup = BeautifulSoup(response.text, 'html.parser')
            elements = soup.findAll(self._element_tag, self._element_query)
            if not elements:
                raise ElementNotFoundInGetRequest(elements)

            eof = []
            for el in elements:
                temp = ''
                for e in el.findAll('a'):
                    if e.parent.name == 'div':
                        if e and "/translate/{}-{}/".format(
                                self._target, self._source) in e.get('href'):
                            temp += e.get_text() + ' '
                            if not kwargs.get('return_all'):
                                return temp
                eof.append(temp)

            if 'return_all' in kwargs and kwargs.get('return_all'):
                return [word for word in eof if word and len(word) > 1]
Esempio n. 24
0
def get_fetch_plugin_versions_request(base_url, plugin_key, params={}):
    if not plugin_key:
        return
    plugin_key = quote(str(plugin_key), '')
    endpoint = ('/rest/2/addons/{plugin_key}/versions'.format(
        plugin_key=plugin_key))
    return grequests.get(base_url + endpoint, params=params)
def get_fetch_plugin_versions_request(base_url, plugin_key, params={}):
    if not plugin_key:
        return
    plugin_key = quote(str(plugin_key), '')
    endpoint = ('/rest/2/addons/{plugin_key}/versions'
                .format(plugin_key=plugin_key))
    return grequests.get(base_url + endpoint, params=params)
Esempio n. 26
0
def get_prev_close(symbol):
    url = 'https://www.nseindia.com/live_market/dynaContent/live_watch/get_quote/GetQuote.jsp?symbol=' + \
        quote(symbol)
    r = requests.get(url)
    prev_close = json.loads(BeautifulSoup(r.text, 'html5lib').find(
        id="responseDiv").get_text())['data'][0]['previousClose']
    return (symbol, prev_close)
Esempio n. 27
0
 def __str__(self):
     """Url-encode each of the arguments, and return a query fragment like `/field/operator value`."""
     if self.operator in operator._NUMERIC:
         self.value = str(int(self.value))
     if self.operator in operator._TIME and self.field != 'timestamp':
         raise (ValueError(
             "Time operator '%s' can only be used with the 'timestamp' field."
             % self.operator))
     if self.operator in operator._BOOLEAN:  # Boolean operators don't include a value
         if self.value not in (None, ""):
             warnings.warn("Attempted to use boolean operator with a value")
         return '/' + quote(self.field, safe="") + '/' + quote(
             self.operator, safe="")
     else:
         return '/' + quote(self.field, safe="") + '/' + quote(
             self.operator, safe="") + quote(self.value, safe="")
Esempio n. 28
0
    def __init__(self, query, per_page=10, start_page=1, lazy=True, **kwargs):
        """
        Args:
            query(str):         The search query to execute.
            per_page(int):      The number of results to retrieve per page.
            start_page(int):    The starting page for queries.
            lazy(bool):         Don't execute the query until results are requested. Defaults to True.
            **kwargs:           Arbitrary keyword arguments, refer to the documentation for more information.

        Raises:
            ValueError: Raised if the supplied per_page argument is less than 1 or greater than 100
        """
        self._log = logging.getLogger('poogle')

        self._query = query
        self._search_url = self.SEARCH_URL + quote(query)

        if (per_page < 1) or (per_page > 100):
            raise ValueError(
                'per_page must contain be an integer between 1 and 100')
        self._per_page = per_page

        self._lazy = lazy
        self._query_count = 0
        self.total_results = 0

        self._results = []
        self._current_page = start_page - 1
        self.last = None

        self.strict = kwargs.get('strict', False)

        if not self._lazy:
            self.next_page()
Esempio n. 29
0
def get_triplestore_parent_department(body_uri, graph, print_urls=False):
    # body_uri
    # http://reference.data.gov.uk/id/department/co
    # http://reference.data.gov.uk/id/public-body/consumer-focus
    body_type, body_name = \
        re.match('http://reference.data.gov.uk/id/(.*)/(.*)', body_uri).groups()
    # get
    # http://reference.data.gov.uk/2011-09-30/doc/public-body/appointments-commission.json
    url_base = 'http://reference.data.gov.uk/{graph}/doc/{body_type}/{body_name}.json'
    url = url_base.format(graph=graph,
                          body_type=body_type,
                          body_name=quote(body_name))
    if print_urls:
        print 'Getting: ', url
    response = requests.get(url)
    primary_topic = response.json()['result']['primaryTopic']
    if body_type == 'department':
        # in the spreadsheet, a department's parent is itself
        label = primary_topic['label'][0]
    else:
        parent_department = primary_topic['parentDepartment']
        if isinstance(parent_department,
                      basestring) and '/hta/' in parent_department:
            # file:///cygdrive/c/temp/data/hta/2012-09-30/300912-HTA-Organogram-V1.ttl
            label = 'Department of Health'
        else:
            label = get_value(parent_department,
                              dict_key='label',
                              list_index=0)
        if 'http' in label:
            match = match_to_dgu_dept(label)
            label = match['title']
    return label
def get_fetch_plugin_license_request(base_url, plugin_key, auth=None):
    if not plugin_key:
        return None
    plugin_key = quote(str(plugin_key), '')
    endpoint = ('/rest/plugins/1.0/{plugin_key}/license'
                .format(plugin_key=plugin_key))
    return grequests.get(base_url + endpoint, auth=auth)
Esempio n. 31
0
 def read_queries_from_file(self):
     """
     read queries from file and qute it as urlencoded
     """
     with open(self.queries_file, 'r') as f:
         for line in f:
             self.queries.append(quote(line))
Esempio n. 32
0
    def create_asset(self, name, label, content_type, download_url,
                     upload_url):
        """Create and upload a new asset for a mirror repository.

        The assets upload method is not supported by PyGitHub. This method
        downloads an asset from the original repository and makes a new asset
        upload in the mirrored repository by querying against the GitHub API
        directly.

        """
        if label is None:
            upload_url = upload_url.replace("{?name,label}", "") + "?name={}"
            upload_url = upload_url.format(name)
        else:
            upload_url = upload_url.replace("{?name,label}", "") +\
                "?name={}&label={}"
            upload_url = upload_url.format(name, quote(label))

        headers = {"Content-type": content_type,
                   "Authorization": "token {}".format(self.token)}

        data = urllib2.urlopen(download_url).read()

        res = requests.post(url=upload_url,
                            data=data,
                            headers=headers,
                            verify=False)
Esempio n. 33
0
def alert_hipchat(alert, metric, second_order_resolution_seconds):

    # SECOND_ORDER_RESOLUTION_SECONDS to hours so that Mirage surfaces the
    # relevant timeseries data in the graph
    second_order_resolution_in_hours = int(second_order_resolution_seconds) / 3600

    if settings.HIPCHAT_ENABLED:
        sender = settings.HIPCHAT_OPTS['sender']
        import hipchat
        hipster = hipchat.HipChat(token=settings.HIPCHAT_OPTS['auth_token'])
        rooms = settings.HIPCHAT_OPTS['rooms'][alert[0]]

        unencoded_graph_title = 'Skyline Mirage - ALERT at %s hours - anomalous data point - %s' % (
            second_order_resolution_in_hours, metric[0])
        graph_title_string = quote(unencoded_graph_title, safe='')
        graph_title = '&title=%s' % graph_title_string

        if settings.GRAPHITE_PORT != '':
            link = '%s://%s:%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=orange' % (settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, settings.GRAPHITE_PORT, second_order_resolution_in_hours, metric[1], settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
        else:
            link = '%s://%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=orange' % (settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, second_order_resolution_in_hours, metric[1], settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
        embed_graph = "<a href='" + link + "'><img height='308' src='" + link + "'>" + metric[1] + "</a>"

        for room in rooms:
            hipster.method('rooms/message', method='POST', parameters={'room_id': room, 'from': 'skyline', 'color': settings.HIPCHAT_OPTS['color'], 'message': '%s - Mirage - Anomalous metric: %s (value: %s) at %s hours %s' % (sender, metric[1], metric[0], second_order_resolution_in_hours, embed_graph)})
    else:
        hipchat_not_enabled = True
Esempio n. 34
0
def forgot_password(request):
    request_json = json.loads(request.body)
    if not request_json.get('email'):
        return create_json_response({}, status=400, reason='Email is required')

    users = User.objects.filter(email__iexact=request_json['email'])
    if users.count() != 1:
        return create_json_response({},
                                    status=400,
                                    reason='No account found for this email')
    user = users.first()

    email_content = """
        Hi there {full_name}--

        Please click this link to reset your seqr password:
        {base_url}users/set_password/{password_token}?reset=true
        """.format(
        full_name=user.get_full_name(),
        base_url=BASE_URL,
        password_token=quote(user.password, safe=''),
    )

    try:
        user.email_user('Reset your seqr password',
                        email_content,
                        fail_silently=False)
    except AnymailError as e:
        return create_json_response({},
                                    status=getattr(e, 'status_code', None)
                                    or 400,
                                    reason=str(e))

    return create_json_response({'success': True})
Esempio n. 35
0
    def recursive():
        query = "folder=app:task:all&filter[name]=Requester&filter[state]=active&filter[definition_id]=%s&filter[process_id]=%s" % (
            os.getenv("DEFINITION_ID"), process_id)
        
        url = os.getenv("BASE_URL_TASK")+"?"+quote(query, safe="&=")
        r_get = requests.get(url, headers={
                             "Content-Type": "Application/json", "Authorization": "Bearer %s" % user_token})
        result = json.loads(r_get.text)
        print(r_get.text)
        print("loading")
        if result['data'] is None or len(result['data']) == 0:
            print("masuk if")
            recursive()
        else:
            print("masuk else")
            # get scm email and task id
            SCM_email = result['data'][0]['form_data']['pVSCM']
            task_id = result['data'][0]['id']
            print(SCM_email)
            # gerakin flow ke scm dari requester
            submit_data = {
                "data": {
                    "form_data": {
                        "pVSCM": SCM_email

                    },
                    "comment": req_comment
                }
            }

            r_post = requests.post(os.getenv("BASE_URL_TASK") + "/" + task_id + "/submit", data=json.dumps(submit_data), headers={
                "Content-Type": "application/json", "Authorization": "Bearer %s" % user_token})
            result = json.loads(r_post.text)
            print(result)
            return r_get.text
Esempio n. 36
0
def getProgressBar():
    decoded = jwt.decode(request.headers["Authorization"], jwtSecretKey, algorithm=['HS256'])
    username = decoded['username']
    # print(username)
    searchToken = User.query.filter_by(user_name=username).first()
    user_token = searchToken.token

    userRole = Roles.query.filter_by(id = searchToken.position_id).first()
    # print(userRole.role)

    
    if (userRole.role == "Requester"):
        position="Requester"
    elif (userRole.role == "SCM"):
        position="SCM Reviewer"
    elif (userRole.role == "Manager"):
        position="Manager Approval"
    elif (userRole.role == "Contract Owner"):
        position="Contract Owner Approval"
        # print(position)

    query = "folder=app:task:all&page[number]=1&page[size]=10&filter[name]=%s&filter[state]=active&filter[definition_id]=%s" % (position,os.getenv("DEFINITION_ID"))
    url = os.getenv("BASE_URL_TASK")+"?"+quote(query, safe="&=")

    r_get = requests.get(url, headers={
        "Content-Type": "Application/json", "Authorization": "Bearer %s" % user_token
    })
    result = json.loads(r_get.text)
    return r_get.text, 200
Esempio n. 37
0
    def __init__(self, query, per_page=10, start_page=1, lazy=True, **kwargs):
        """
        Args:
            query(str):         The search query to execute.
            per_page(int):      The number of results to retrieve per page.
            start_page(int):    The starting page for queries.
            lazy(bool):         Don't execute the query until results are requested. Defaults to True.
            **kwargs:           Arbitrary keyword arguments, refer to the documentation for more information.

        Raises:
            ValueError: Raised if the supplied per_page argument is less than 1 or greater than 100
        """
        self._log = logging.getLogger('poogle')

        self._query        = query
        self._search_url   = self.SEARCH_URL + quote(query)

        if (per_page < 1) or (per_page > 100):
            raise ValueError('per_page must contain be an integer between 1 and 100')
        self._per_page     = per_page

        self._lazy         = lazy
        self._query_count  = 0
        self.total_results = 0

        self._results      = []
        self._current_page = start_page - 1
        self.last          = None

        self.strict = kwargs.get('strict', False)

        if not self._lazy:
            self.next_page()
Esempio n. 38
0
        def recursive():
            if userDB is not None:
                user_token = userDB.token
                query = "folder=app:task:all&page[number]=1&page[size]=10&filter[name]=Contract Owner Approval&filter[state]=active&filter[process_id]=%s&filter[definition_id]=%s" % (process_id,os.getenv("DEFINITION_ID"))
                url = os.getenv("BASE_URL_TASK")+"?"+quote(query, safe="&=")

                r_get = requests.get(url, headers={
                    "Content-Type": "Application/json", "Authorization": "Bearer %s" % user_token
                })
                result = json.loads(r_get.text)
                print("loading")
                
                if result['data'] is None or len(result['data']) == 0:
                    recursive()
                else:
                    # get task id
                    task_id = result['data'][0]['id']
                    
                    # gerakin flow ke manager dari SCM
                    submit_data = {
                        "data": {
                            "form_data": {
                                
                                
                            },
                            "comment": req_comment
                        }
                    }
                    
                    r_post = requests.post(os.getenv("BASE_URL_TASK") + "/" + task_id + "/submit", data=json.dumps(submit_data), headers={
                        "Content-Type": "application/json", "Authorization": "Bearer %s" % user_token})
                    result = json.loads(r_post.text)
                    print(result)
                    return r_get.text
Esempio n. 39
0
def reviset_list():
    decoded = jwt.decode(request.headers["Authorization"], jwtSecretKey, algorithm=['HS256'])
    email = decoded["email"]
    userDB = User.query.filter_by(email = email).first()
    user_token = userDB.token

    query = "folder=app:task:all&filter[name]=Requester&filter[state]=active&filter[definition_id]=%s" % (
            os.getenv("DEFINITION_ID"))
        
    url = os.getenv("BASE_URL_TASK")+"?"+quote(query, safe="&=")
    r_get = requests.get(url, headers={"Content-Type": "Application/json", "Authorization": "Bearer %s" % user_token})
    result = json.loads(r_get.text)

    to_revise = []
    for po in result['data']:
        process_id = po['process_id']
        contractDB = Contract.query.filter_by(process_id = process_id).first()
        userDB = User.query.filter_by(id = contractDB.user_id).first()
        json_format = {
            "sap contract number" : contractDB.SAP_contract_number,
            "requester" : userDB.user_name,
            "vendor name" : contractDB.vendor_name
        }
        to_revise.append(json_format)
    
    return json.dumps(to_revise)
Esempio n. 40
0
    def _request(self,
                 method,
                 uri,
                 path_params=None,
                 flatten_params=True,
                 **kwargs):
        if path_params:
            # Ensure path param is encoded.
            path_params = {
                key: quote(str(value), safe=u'')
                for key, value in path_params.items()
            }
            uri %= path_params

        # Custom nested object flattening
        if flatten_params and 'params' in kwargs:
            kwargs['params'] = self._flatten_param(kwargs['params'])

        full_uri = self._endpoint + uri

        response = self._session.request(method, full_uri, **kwargs)
        log_message = format_request(response)

        logging.info(log_message)
        if not 200 <= response.status_code <= 299:
            logging.error(log_message)

        return response
Esempio n. 41
0
 def get_balance(self):
     res = self.get_child_accounts()
     if not res.get('succ'):
         raise Exception(res.get('msg'))
     for x in self.none_cost.copy():
         if self.none_cost.count(x) < 2:
             self.none_cost.remove(x)
     self.none_cost = list(set(self.none_cost))
     for acct, company in self.child_accounts:
         if acct not in self.none_cost:
             logger.info('skip has cost: %s' % acct)
             continue
         quote_acct = quote(acct)
         url = 'https://e.yunos.com/api/member/balance?identity=%s' % quote_acct
         ref = 'https://e.yunos.com/?identity=%s' % quote_acct
         res = self.deal_result(self.execute('GET', url, referer=ref),
                                json_str=True)
         if not res.get('succ'):
             raise Exception(res.get('msg'))
         if not res.get('msg').get('success'):
             logger.error('balance res: %s' % res.get('msg'))
             return res
         balance = res.get('msg').get('data') / 100
         bd = {'账号': acct, '账户余额': balance}
         logger.info(bd)
         self.balance_data.append(bd)
Esempio n. 42
0
def alert_hipchat(alert, metric, second_order_resolution_seconds):
    """
    Called by :func:`~trigger_alert` and sends an alert the hipchat room that is
    configured in settings.py.
    """

    # SECOND_ORDER_RESOLUTION_SECONDS to hours so that Mirage surfaces the
    # relevant timeseries data in the graph
    second_order_resolution_in_hours = int(second_order_resolution_seconds) / 3600

    if settings.HIPCHAT_ENABLED:
        sender = settings.HIPCHAT_OPTS['sender']
        import hipchat
        hipster = hipchat.HipChat(token=settings.HIPCHAT_OPTS['auth_token'])
        rooms = settings.HIPCHAT_OPTS['rooms'][alert[0]]

        unencoded_graph_title = 'Skyline Mirage - ALERT at %s hours - anomalous data point - %s' % (
            second_order_resolution_in_hours, metric[0])
        graph_title_string = quote(unencoded_graph_title, safe='')
        graph_title = '&title=%s' % graph_title_string

        if settings.GRAPHITE_PORT != '':
            link = '%s://%s:%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=orange' % (settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, settings.GRAPHITE_PORT, second_order_resolution_in_hours, metric[1], settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
        else:
            link = '%s://%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=orange' % (settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, second_order_resolution_in_hours, metric[1], settings.GRAPHITE_GRAPH_SETTINGS, graph_title)
        embed_graph = "<a href='" + link + "'><img height='308' src='" + link + "'>" + metric[1] + "</a>"

        for room in rooms:
            hipster.method('rooms/message', method='POST', parameters={'room_id': room, 'from': 'skyline', 'color': settings.HIPCHAT_OPTS['color'], 'message': '%s - Mirage - Anomalous metric: %s (value: %s) at %s hours %s' % (sender, metric[1], metric[0], second_order_resolution_in_hours, embed_graph)})
    else:
        return False
Esempio n. 43
0
def list_from_cdx(qurl):
    """
    Checks if a resource is in the CDX index.

    :return: a list of matches by timestamp
    """
    query = "%s?q=type:urlquery+url:%s" % (CDX_SERVER, quote(qurl))
    logger.info("Querying: %s" % query)
    r = requests.get(query)
    logger.info("Availability response: %d" % r.status_code)
    result_set = OrderedDict()
    # Is it known, with a matching timestamp?
    if r.status_code == 200:
        try:
            dom = xml.dom.minidom.parseString(r.text)
            for result in dom.getElementsByTagName('result'):
                warc_file = result.getElementsByTagName('file')[0].firstChild.nodeValue
                compressed_offset = result.getElementsByTagName('compressedoffset')[0].firstChild.nodeValue
                capture_date = result.getElementsByTagName('capturedate')[0].firstChild.nodeValue
                # Support compressed record length if present:
                compressed_end_offset_elem = result.getElementsByTagName('compressedendoffset')
                if len(compressed_end_offset_elem) > 0:
                    compressed_end_offset = compressed_end_offset_elem[0].firstChild.nodeValue
                else:
                    compressed_end_offset = None
                result_set[capture_date] = warc_file, compressed_offset, compressed_end_offset
        except Exception as e:
            logger.error("Lookup failed for %s!" % qurl)
            logger.exception(e)

    return result_set
def parse_hit(hit: AttrDict, highlighting: bool = True) -> Dict[str, Any]:
    parsed = hit.to_dict()  # Adds name and reference_number if available
    parsed["type"] = hit.meta.doc_type.replace("_document",
                                               "").replace("_", "-")
    parsed["type_translated"] = DOCUMENT_TYPE_NAMES[parsed["type"]]
    parsed["url"] = reverse(parsed["type"], args=[hit.id])
    parsed["score"] = hit.meta.score

    if highlighting:
        highlights = get_highlights(hit, parsed)
        if len(highlights) > 0:
            parsed["highlight"] = html_escape_highlight(highlights[0])
            parsed["highlight_extracted"] = (
                highlights[0].split("<mark>")[1].split("</mark>")[0])
        else:
            parsed["highlight"] = None
            parsed["highlight_extracted"] = None

        if hit.type == "file" and hit.highlight_extracted:
            parsed["url"] += "?pdfjs_search=" + quote(
                parsed["highlight_extracted"])

    parsed["name_escaped"] = html_escape_highlight(parsed["name"])
    parsed["reference_number_escaped"] = html_escape_highlight(
        parsed.get("reference_number"))

    return parsed
Esempio n. 45
0
    def get_context_data(self, **kwargs):
        context = super().get_context_data(**kwargs)
        context['project'] = self.project

        build = self.get_object()
        if build.error != BuildEnvironmentError.GENERIC_WITH_BUILD_ID.format(build_id=build.pk):
            # Do not suggest to open an issue if the error is not generic
            return context

        scheme = (
            'https://github.com/rtfd/readthedocs.org/issues/new'
            '?title={title}{build_id}'
            '&body={body}'
        )

        # TODO: we could use ``.github/ISSUE_TEMPLATE.md`` here, but we would
        # need to add some variables to it which could impact in the UX when
        # filling an issue from the web
        body = """
        ## Details:

        * Project URL: https://readthedocs.org/projects/{project_slug}/
        * Build URL(if applicable): https://readthedocs.org{build_path}
        * Read the Docs username(if applicable): {username}

        ## Expected Result

        *A description of what you wanted to happen*

        ## Actual Result

        *A description of what actually happened*""".format(
            project_slug=self.project,
            build_path=self.request.path,
            username=self.request.user,
        )

        scheme_dict = {
            'title': quote('Build error with build id #'),
            'build_id': context['build'].id,
            'body': quote(textwrap.dedent(body)),
        }

        issue_url = scheme.format(**scheme_dict)
        issue_url = urlparse(issue_url).geturl()
        context['issue_url'] = issue_url
        return context
def login():
    """
    Initiate login attempt
    :return: Redirects to GitHub for login
    """
    return redirect("https://github.com/login/oauth/authorize?client_id=" +
                    GITHUB_CLIENT_ID + "&redirect_uri=" +
                    quote(OAUTH_REDIRECT_URI), code=302)
Esempio n. 47
0
	def getGroup(self, groupName='/'):
		''' Returns all members and subgroups of a given group.
		Args: 
		   groupName : name of the group, default is root
		Returns:
		   all members and subgroups of a given group
		'''
		self.logger.debug('getGroup ' + groupName)
		return self._get('group/' + quote(groupName,''))
Esempio n. 48
0
	def createGroup(self, groupName):	
		''' Creates a new group. The created group will be empty.
		Args: 
		   groupName : name of the group
		Returns:
		   nothing
		'''
		self.logger.debug('createGroup ' + groupName)
		self._post('group/' + quote(groupName,''))
Esempio n. 49
0
def document_available(wayback_url, url, ts):
    """

    Queries Wayback to see if the content is there yet.

    e.g.
    http://192.168.99.100:8080/wayback/xmlquery.jsp?type=urlquery&url=https://www.gov.uk/government/uploads/system/uploads/attachment_data/file/497662/accidents-involving-illegal-alcohol-levels-2014.pdf

    <wayback>
    <request>
        <startdate>19960101000000</startdate>
        <resultstype>resultstypecapture</resultstype>
        <type>urlquery</type>
        <enddate>20160204115837</enddate>
        <firstreturned>0</firstreturned>
        <url>uk,gov)/government/uploads/system/uploads/attachment_data/file/497662/accidents-involving-illegal-alcohol-levels-2014.pdf
</url>
        <resultsrequested>10000</resultsrequested>
        <resultstype>resultstypecapture</resultstype>
    </request>
    <results>
        <result>
            <compressedoffset>2563</compressedoffset>
            <mimetype>application/pdf</mimetype>
            <redirecturl>-</redirecturl>
            <file>BL-20160204113809800-00000-33~d39c9051c787~8443.warc.gz
</file>
            <urlkey>uk,gov)/government/uploads/system/uploads/attachment_data/file/497662/accidents-involving-illegal-alcohol-levels-2014.pdf
</urlkey>
            <digest>JK2AKXS4YFVNOTPS7Q6H2Q42WQ3PNXZK</digest>
            <httpresponsecode>200</httpresponsecode>
            <robotflags>-</robotflags>
            <url>https://www.gov.uk/government/uploads/system/uploads/attachment_data/file/497662/accidents-involving-illegal-alcohol-levels-2014.pdf
</url>
            <capturedate>20160204113813</capturedate>
        </result>
    </results>
</wayback>

    """
    try:
        wburl = '%s/xmlquery.jsp?type=urlquery&url=%s' % (wayback_url, quote(url))
        logger.debug("Checking %s" % wburl);
        r = requests.get(wburl)
        logger.debug("Response: %d" % r.status_code)
        # Is it known, with a matching timestamp?
        if r.status_code == 200:
            dom = xml.dom.minidom.parseString(r.text)
            for de in dom.getElementsByTagName('capturedate'):
                if de.firstChild.nodeValue == ts:
                    # Excellent, it's been found:
                    return True
    except Exception as e:
        logger.error( "%s [%s %s]" % ( str( e ), url, ts ) )
        logger.exception(e)
    # Otherwise:
    return False
Esempio n. 50
0
 def test_prepare_http_request_correct(self):
     """
     Tests that prepare_http_request returns a proper HTTP request if all the mandatory
     parameters are passed in as expected.
     """
     for kw in ("fluent python", "hello", "@#$%&&&&&", "12eee@#@#"):
         request = prepare_http_request(keyword=kw)
         expected_request = "https://www.googleapis.com/books/v1/volumes?q=%s" % (quote(kw))
         self.assertEqual(request, expected_request)
Esempio n. 51
0
    def request_vpn(self):
        if not self.user:
            raise RequestVpnFailed('Not Login yet.')
        lb64 = LongBase64()
        vpnparams = 'username=%s&password=%s&flashInfo=%s' % (
            self.user['stcode'], self.user['userpassword'], self.user['sxcode'])
        postdata = quote(vpnparams)

        vpn_resp = self.s.post(self._vpn_uri, params={'st': postdata})
        return lb64.decodestring(vpn_resp.content)
Esempio n. 52
0
def genderfromname(name, tries=3):
#    name = '김옥선'
    testurl = 'http://www.erumy.com/nameAnalyze/AnalyzeMyName.aspx?name='

    testurl = testurl + quote(name)

    try:
        c = MongoClient(host='localhost', port=27017)
        print("Connected successfully")
    except ConnectionFailure as e:
        sys.stderr.write("Connection failed.")
        sys.exit(1)
    dbh = c['commentdb']
    try:
        checkfirst = dbh.namegender.find_one({'name': name})
    except UnicodeError as e:
        sys.stderr.write("Failed to read characters.")
        return "Stopped"

    if checkfirst:
        return checkfirst['gender']
    men = 0
    women = 0
    try:
        with urllib.request.urlopen(testurl) as response:
            with codecs.open('test2.html', encoding='utf-8', mode='w') as writefile:
                writefile.write(response.read().decode('utf-8'))

        with codecs.open('test2.html', encoding='utf-8', mode='r') as readfile:
            contents = readfile.read()
            with codecs.open('test2.html', encoding='utf-8', mode='w') as writefile:
                writefile.write(contents)

        with codecs.open('test2.html', encoding='utf-8', mode='r') as readfile:
            contents = readfile.read().split('\n')
            for line in contents:
                if '남자' in line and '<img src' in line:
                    men = grablength(line)
                if '여자' in line and '<img src' in line:
                    women = grablength(line)
        print(men, women)

        if men > women * 2:
            dbh.namegender.insert({'name': name, 'gender': 'male'})
            return True#return true for man
        elif men * 2 < women:
            dbh.namegender.insert({'name': name, 'gender': 'female'})
            return False#return false for women
        dbh.namegender.insert({'name': name, 'gender': 'unclear'})
        return "Unclear"
    except:
        if tries > 0:
            return genderfromname(name, tries - 1)
        else:
            return "Stopped"
Esempio n. 53
0
def alert_hipchat(datapoint, metric_name, expiration_time, metric_trigger, algorithm):

    if settings.HIPCHAT_ENABLED:
        sender = settings.BOUNDARY_HIPCHAT_OPTS['sender']
        import hipchat
        hipster = hipchat.HipChat(token=settings.BOUNDARY_HIPCHAT_OPTS['auth_token'])

        # Allow for absolute path metric namespaces but also allow for and match
        # match wildcard namepaces if there is not an absolute path metric namespace
        rooms = 'unknown'
        notify_rooms = []
        matched_rooms = []
        try:
            rooms = settings.BOUNDARY_HIPCHAT_OPTS['rooms'][metric_name]
            notify_rooms.append(rooms)
        except:
            for room in settings.BOUNDARY_HIPCHAT_OPTS['rooms']:
                print(room)
                CHECK_MATCH_PATTERN = room
                check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
                pattern_match = check_match_pattern.match(metric_name)
                if pattern_match:
                    matched_rooms.append(room)

        if matched_rooms != []:
            for i_metric_name in matched_rooms:
                rooms = settings.BOUNDARY_HIPCHAT_OPTS['rooms'][i_metric_name]
                notify_rooms.append(rooms)

        alert_algo = str(algorithm)
        alert_context = alert_algo.upper()

        unencoded_graph_title = 'Skyline Boundary - %s at %s hours - %s - %s' % (
            alert_context, graphite_previous_hours, metric_name, datapoint)
        graph_title_string = quote(unencoded_graph_title, safe='')
        graph_title = '&title=%s' % graph_title_string

        if settings.GRAPHITE_PORT != '':
            link = '%s://%s:%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=%s' % (
                settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, settings.GRAPHITE_PORT,
                graphite_previous_hours, metric_name, settings.GRAPHITE_GRAPH_SETTINGS,
                graph_title, graphite_graph_line_color)
        else:
            link = '%s://%s/render/?from=-%shour&target=cactiStyle(%s)%s%s&colorList=%s' % (
                settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST, graphite_previous_hours,
                metric_name, settings.GRAPHITE_GRAPH_SETTINGS, graph_title,
                graphite_graph_line_color)

        embed_graph = "<a href='" + link + "'><img height='308' src='" + link + "'>" + metric_name + "</a>"

        for rooms in notify_rooms:
            for room in rooms:
                hipster.method('rooms/message', method='POST', parameters={'room_id': room, 'from': 'skyline', 'color': settings.BOUNDARY_HIPCHAT_OPTS['color'], 'message': '%s - Boundary - %s - Anomalous metric: %s (value: %s) at %s hours %s' % (sender, algorithm, metric_name, datapoint, graphite_previous_hours, embed_graph)})
    else:
        hipchat_not_enabled = True
Esempio n. 54
0
    def append_url_with_query_parameters(url,
                                         parameters,
                                         array_serialization="indexed"):
        """Adds query parameters to a URL.

        Args:
            url (str): The URL string.
            parameters (dict): The query parameters to add to the URL.
            array_serialization (str): The format of array parameter serialization.

        Returns:
            str: URL with added query parameters.

        """
        # Parameter validation
        if url is None:
            raise ValueError("URL is None.")
        if parameters is None:
            return url

        for key, value in parameters.items():
            seperator = '&' if '?' in url else '?'
            if value is not None:
                if isinstance(value, list):
                    value = [element for element in value if element]
                    if array_serialization is "csv":
                        url += "{0}{1}={2}".format(seperator, key,
                            ",".join(quote(str(x), safe='') for x in value))
                    elif array_serialization is "psv":
                        url += "{0}{1}={2}".format(seperator, key,
                            "|".join(quote(str(x), safe='') for x in value))
                    elif array_serialization is "tsv":
                        url += "{0}{1}={2}".format(seperator, key,
                            "\t".join(quote(str(x), safe='') for x in value))
                    else:
                        url += "{0}{1}".format(seperator,
                            "&".join(("{0}={1}".format(k, quote(str(v), safe='')))
                                for k, v in APIHelper.serialize_array(key, value, array_serialization)))
                else:
                    url += "{0}{1}={2}".format(seperator, key, quote(str(value), safe=''))

        return url
def fetch_plugin_versions(base_url, plugin_key, params={}):
    if not plugin_key:
        return
    plugin_key = quote(str(plugin_key), '')
    endpoint = ('/rest/2/addons/{plugin_key}/versions'
                .format(plugin_key=plugin_key))
    response = do_request('get', base_url, endpoint, params)
    if not response.ok:
        return

    return response.json()['_embedded']['versions']
Esempio n. 56
0
def search(query, starting_index):
    resp = json.loads(urllib2.urlopen(GOOGLE_SEARCH_API.format(query=quote(query, safe=''), starting_index=starting_index)).read())
    return [
        {
            # Escape HTML tags out
            'title': re.sub('<[^<]+?>', '', result['title']),
            'url': re.sub('<[^<]+?>', '', result['url']),
            'description': re.sub('<[^<]+?>', '', result['content']),
        }
        for result in resp['responseData']['results']
    ]
def fetch_plugin_license(base_url, plugin_key, auth=None):
    if not plugin_key:
        return None
    plugin_key = quote(str(plugin_key), '')
    endpoint = ('/rest/plugins/1.0/{plugin_key}/license'
                .format(plugin_key=plugin_key))
    response = do_request('get', base_url, endpoint, auth=auth)
    if not response.ok:
        return

    return response.json()
def blacklist(url):
    url = quote(url, safe="")
    request_url = "https://sb-ssl.google.com/safebrowsing/api/lookup?client=skripsi_phishing&key=" \
                  + key_api + "&appver=1.0.0&pver=3.1&url=" + url

    # print requests.get(request_url).status_code
    request = requests.get(request_url, headers = headers).text

    if(request == "phishing" or request == "malware"):
        return -1
    else:
        return 1
Esempio n. 59
0
 def collectionget(self, name, hidemissing=False):
     cstatus = None
     while cstatus != 'ready':
         resp = yield self.http_client \
             .fetch(self.get_url('/api/collection/{}.json?hidemissing={}'
                                 .format(
                                     quote(name),
                                     str(hidemissing).lower())))
         self.assertEqual(resp.code, 200)
         c = json.loads(resp.body.decode('utf-8'))
         cstatus = c['collection']['status']
     raise gen.Return(c)
Esempio n. 60
0
    def do_login(self, password):
        lb64 = LongBase64()
        logininfo = '%s|%s|122.227.254.206|8988|1|4.0.1.2' % (self.username, password)
        logininfo = lb64.encodestring(logininfo)
        postdata = quote(logininfo)

        login_resp = self.s.post(self._login_uri, data={'logininfo': postdata})
        login_res = json.loads(login_resp.content)
        if login_res['r'] != 1:
            raise LoginFailed('Login Failed: %s' % login_res['err_msg'])
        self.user = login_res
        return True, login_res