예제 #1
0
	def urlparamtodic(self,data):
		dic = {}
		args = data.split('&')
		for arg in args:
			keyval = arg.split('=')
			dic[parse.unquote_plus(keyval[0])] = parse.unquote_plus(keyval[1])
		return dic
def parse_userinfo(userinfo):
    """Validates the format of user information in a MongoDB URI.
    Reserved characters like ':', '/', '+' and '@' must be escaped
    following RFC 2396.

    Returns a 2-tuple containing the unescaped username followed
    by the unescaped password.

    :Paramaters:
        - `userinfo`: A string of the form <username>:<password>

    .. versionchanged:: 2.2
       Now uses `urllib.unquote_plus` so `+` characters must be escaped.
    """
    if '@' in userinfo or userinfo.count(':') > 1:
        raise InvalidURI("':' or '@' characters in a username or password "
                         "must be escaped according to RFC 2396.")
    user, _, passwd = _partition(userinfo, ":")
    # No password is expected with GSSAPI authentication.
    if not user:
        raise InvalidURI("The empty string is not valid username.")
    user = unquote_plus(user)
    passwd = unquote_plus(passwd)

    return user, passwd
예제 #3
0
def parse_userinfo(userinfo):
    """Validates the format of user information in a MongoDB URI.
    Reserved characters like ':', '/', '+' and '@' must be escaped
    following RFC 3986.

    Returns a 2-tuple containing the unescaped username followed
    by the unescaped password.

    :Paramaters:
        - `userinfo`: A string of the form <username>:<password>

    .. versionchanged:: 2.2
       Now uses `urllib.unquote_plus` so `+` characters must be escaped.
    """
    if '@' in userinfo or userinfo.count(':') > 1:
        if PY3:
            quote_fn = "urllib.parse.quote_plus"
        else:
            quote_fn = "urllib.quote_plus"
        raise InvalidURI("Username and password must be escaped according to "
                         "RFC 3986, use %s()." % quote_fn)
    user, _, passwd = _partition(userinfo, ":")
    # No password is expected with GSSAPI authentication.
    if not user:
        raise InvalidURI("The empty string is not valid username.")
    return unquote_plus(user), unquote_plus(passwd)
예제 #4
0
파일: asis.py 프로젝트: rohe/oidctest
def print_conf(c):
    iss, tag = c.split('][', 2)
    fname= os.path.join('entities', quote_plus(unquote_plus(iss)), quote_plus(unquote_plus(tag)))
    cnf = json.loads(open(fname,'r').read())
    print(">>>", fname)
    print(json.dumps(cnf, sort_keys=True, indent=2,
                     separators=(',', ': ')))
예제 #5
0
def update_group_station_state(group, station):
    """
    Required by the android client.
    """
    group = unquote_plus(group)
    station = unquote_plus(station)
    try:
        form_score = request.json['form_score']
        station_score = request.json['score']
        station_state = request.json['state']
    except LookupError:
        return jsonify({'message': 'Missing value'}), 400

    loco.set_score(mdl.DB.session, group, station, station_score, form_score,
                   station_state)

    station_entity = mdl.Station.one(name=station)
    group_entity = mdl.Group.one(name=group)

    return jsonify(
        group_name=group,
        form_score=form_score,
        score=station_score,
        state=station_state,
        station_name=station,
        station_id=station_entity.id,
        group_id=group_entity.id)
예제 #6
0
파일: server.py 프로젝트: nbeguier/cassh
def ldap_authentification(admin=False):
    """
    Return True if user is well authentified
        [email protected]
        password=xxxxx
    """
    if SERVER_OPTS['ldap']:
        credentials = data2map()
        if 'realname' in credentials:
            realname = unquote_plus(credentials['realname'])
        else:
            return False, 'Error: No realname option given.'
        if 'password' in credentials:
            password = unquote_plus(credentials['password'])
        else:
            return False, 'Error: No password option given.'
        if password == '':
            return False, 'Error: password is empty.'
        ldap_conn = initialize("ldap://"+SERVER_OPTS['ldap_host'])
        try:
            ldap_conn.bind_s(realname, password)
        except Exception as e:
            return False, 'Error: %s' % e
        if admin:
            memberof_admin_list = ldap_conn.search_s(
                SERVER_OPTS['ldap_bind_dn'],
                SCOPE_SUBTREE,
                filterstr='(&(%s=%s)(memberOf=%s))' % (
                    SERVER_OPTS['filterstr'],
                    realname,
                    SERVER_OPTS['ldap_admin_cn']))
            if not memberof_admin_list:
                return False, 'Error: user %s is not an admin.' % realname
    return True, 'OK'
예제 #7
0
def get_magnet_info(uri):
    """Parse torrent information from magnet link.

    Args:
        uri (str): The magnet link.

    Returns:
        dict: Information about the magnet link.

        Format of the magnet dict::

            {
                "name": the torrent name,
                "info_hash": the torrents info_hash,
                "files_tree": empty value for magnet links
            }

    """

    tr0_param = 'tr.'
    tr0_param_regex = re.compile('^tr.(\d+)=(\S+)')
    if not uri.startswith(MAGNET_SCHEME):
        return {}

    name = None
    info_hash = None
    trackers = {}
    tier = 0
    for param in uri[len(MAGNET_SCHEME):].split('&'):
        if param.startswith(XT_BTIH_PARAM):
            xt_hash = param[len(XT_BTIH_PARAM):]
            if len(xt_hash) == 32:
                try:
                    info_hash = base64.b32decode(xt_hash.upper()).encode('hex')
                except TypeError as ex:
                    log.debug('Invalid base32 magnet hash: %s, %s', xt_hash, ex)
                    break
            elif is_infohash(xt_hash):
                info_hash = xt_hash.lower()
            else:
                break
        elif param.startswith(DN_PARAM):
            name = unquote_plus(param[len(DN_PARAM):])
        elif param.startswith(TR_PARAM):
            tracker = unquote_plus(param[len(TR_PARAM):])
            trackers[tracker] = tier
            tier += 1
        elif param.startswith(tr0_param):
            try:
                tier, tracker = re.match(tr0_param_regex, param).groups()
                trackers[tracker] = tier
            except AttributeError:
                pass

    if info_hash:
        if not name:
            name = info_hash
        return {'name': name, 'info_hash': info_hash, 'files_tree': '', 'trackers': trackers}
    else:
        return {}
예제 #8
0
def name_file(imgUrl):
    fileName = unquote_plus(unquote_plus(unquote_plus(basename(imgUrl))))
    if os.path.exists(fileName):
        base, ext = os.path.splitext(fileName)
        nfiles = len(glob.glob(base+'*'+ext))
        fileName = base+'_'+str(nfiles)+ext
    return fileName
예제 #9
0
def article(outlet, pub_date, title):
    outlet_url = get_outlet_url(outlet)
    title = parse.unquote_plus(title)
    results = get_page_info(outlet_url, pub_date, title)
    masthead = parse.unquote_plus(outlet)
    return render_template('article.html',
                           masthead=masthead,
                           results=results)
예제 #10
0
def queryparse(query):
	ret = dict()
	for kvpair in query.split("&"):
		try:
			(key, val) = kvpair.split("=")
			ret[unquote_plus(key)] = unquote_plus(val)
		except Exception: continue
	return ret
예제 #11
0
 def test_safe_urlencode(self):
     self.assertEqual(
         force_unicode(unquote_plus(safe_urlencode({'test': 'Hello ☃! Helllo world!'}))),
         'test=Hello ☃! Helllo world!')
     self.assertEqual(
         force_unicode(unquote_plus(safe_urlencode({'test': ['Hello ☃!', 'Helllo world!']}, True))),
         "test=Hello \u2603!&test=Helllo world!")
     self.assertEqual(
         force_unicode(unquote_plus(safe_urlencode({'test': ('Hello ☃!', 'Helllo world!')}, True))),
         "test=Hello \u2603!&test=Helllo world!")
예제 #12
0
파일: waywire.py 프로젝트: saml/x
def fetch_embed_player(url):
    resp = fetch(url)
    html = unquote_plus(resp.text)
    cid = find_cid(html)
    if cid:
        # url was a redirect page, not actual video player.
        embed_url = '{}/embed/player/container/1920/922/?content={}&widget_type_cid=cvp'.format(base_url(url), cid)
        resp = fetch(embed_url)
        return unquote_plus(resp.text)
    return html
예제 #13
0
 def post(self):
     results = list()
     logging.debug("Received POST request")
     for line in str(self.request.body, 'utf8').split('\n'):
         fields = line.split('\t')
         text = unquote_plus(unquote_plus(fields[0]))
         logging.debug("Classificating %s" % text)
         classification = self.default_classificator.classify(text)
         result = {"text":text, "topics":self.__get_concepts_from_classification(classification)}
         results.append(result)
     self.write({"response":results})
예제 #14
0
파일: escape.py 프로젝트: 1stvamp/tornado
    def url_unescape(value, encoding='utf-8'):
        """Decodes the given value from a URL.

        The argument may be either a byte or unicode string.

        If encoding is None, the result will be a byte string.  Otherwise,
        the result is a unicode string in the specified encoding.
        """
        if encoding is None:
            return urllib_parse.unquote_plus(utf8(value))
        else:
            return unicode_type(urllib_parse.unquote_plus(utf8(value)), encoding)
예제 #15
0
def parse(query_string, unquote=True, normalized=False, encoding=DEFAULT_ENCODING):
    """
    Main parse function
    @param query_string:
    @param unquote: unquote html query string ?
    @param encoding: An optional encoding used to decode the keys and values. Defaults to utf-8, which the W3C declares as a defaul in the W3C algorithm for encoding.
    @see http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-encoding-algorithm

    @param normalized: parse number key in dict to proper list ?
    """

    mydict = {}
    plist = []
    if query_string == "":
        return mydict

    if type(query_string) == bytes:
        query_string = query_string.decode()

    for element in query_string.split("&"):
        try:
            if unquote:
                (var, val) = element.split("=")
                if sys.version_info[0] == 2:
                    var = var.encode("ascii")
                    val = val.encode("ascii")
                var = urllib.unquote_plus(var)
                val = urllib.unquote_plus(val)
            else:
                (var, val) = element.split("=")
        except ValueError:
            raise MalformedQueryStringError
        if encoding:
            var = var.decode(encoding)
            val = val.decode(encoding)
        plist.append(parser_helper(var, val))
    for di in plist:
        (k, v) = di.popitem()
        tempdict = mydict
        while k in tempdict and type(v) is dict:
            tempdict = tempdict[k]
            (k, v) = v.popitem()
        if k in tempdict and type(tempdict[k]).__name__ == "list":
            tempdict[k].append(v)
        elif k in tempdict:
            tempdict[k] = [tempdict[k], v]
        else:
            tempdict[k] = v

    if normalized == True:
        return _normalize(mydict)
    return mydict
예제 #16
0
파일: client.py 프로젝트: janurag/pysolr
 def test_safe_urlencode(self):
     self.assertEqual(
         force_unicode(unquote_plus(safe_urlencode({"test": "Hello ☃! Helllo world!"}))),
         "test=Hello ☃! Helllo world!",
     )
     self.assertEqual(
         force_unicode(unquote_plus(safe_urlencode({"test": ["Hello ☃!", "Helllo world!"]}, True))),
         "test=Hello \u2603!&test=Helllo world!",
     )
     self.assertEqual(
         force_unicode(unquote_plus(safe_urlencode({"test": ("Hello ☃!", "Helllo world!")}, True))),
         "test=Hello \u2603!&test=Helllo world!",
     )
예제 #17
0
파일: action.py 프로젝트: rohe/oidctest
    def _cp_dispatch(self, vpath):
        # Only get here if vpath != None
        ent = cherrypy.request.remote.ip
        logger.info('ent:{}, vpath: {}'.format(ent, vpath))

        if len(vpath):
            if len(vpath) == 2:
                cherrypy.request.params['iss'] = unquote_plus(vpath.pop(0))
                cherrypy.request.params['tag'] = unquote_plus(vpath.pop(0))
            cherrypy.request.params['ev'] = init_events(
                cherrypy.request.path_info)

            return self
예제 #18
0
    def _parseBody(self):
        """Parses the body of the request into a dictionary. At the moment only 
        application/x-www-form-urlencoded is supported!
        """
        
        # If the content_type is defined and the content has a length try to parse the body
        if self.content_type and self.content_length:
            if self.content_type.startswith('application/x-www-form-urlencoded'):
                self.body = MultiDict()
                
                # Read the body from the virtual file
                body = self.environment["wsgi.input"].read(self.content_length)
                
                # Decode the body from its latin-1 decoding to a python string
                body = body.decode('latin-1')
                
                # Split the body into strings containing one key and one value
                pairs = body.split('&')
                
                # For each key value pair split it and decode it from urlencoded strings to a string
                for pair in pairs:
                    (key, _, value) = pair.partition('=');
                    
                    # Add key/value to MultiDict 
                    self.body.append(unquote_plus(key), unquote_plus(value))

            elif self.content_type.startswith("multipart/form-data"):
                self.body = cgi.FieldStorage(fp=self.environment["wsgi.input"], environ=self.environment)

            elif self.content_type.startswith("application/json"):
                if "charset" in self.content_type:
                    try:
                        charset = self.content_type[self.content_type.find("charset"):].rpartition("=")[2]
                    except:
                        charset = "UTF8"
                else:
                    charset = "UTF8"

                # Read the body from the virtual file
                body = self.environment["wsgi.input"].read(self.content_length)

                # Decode the body
                body = body.decode(charset)

                self.body = json.loads(body)
                
        elif self.content_length:
            self.body = self.environment["wsgi.input"].read(self.content_length)
            ß
        else:
            self.body = None
예제 #19
0
파일: formpaser.py 프로젝트: cymoo/minim
 def _url_decode_impl(pair_iter, charset, keep_blank_values, errors):
     for pair in pair_iter:
         if not pair:
             continue
         equal = b'='
         if equal in pair:
             key, value = pair.split(equal, 1)
         else:
             if not keep_blank_values:
                 continue
             key = pair
             value = b''
         yield unquote_plus(safe_str(key)), unquote_plus(safe_str(value),
                                                         charset, errors)
예제 #20
0
def task_break_view(task_id, task_date):
    form = TaskBreakForm(request.form)

    try:
        task = Task.query.filter(
            Task.id == task_id,
            Task.owner_id == current_user.id,
        ).one()
    except NoResultFound:
        # This is not an existing task this user owns
        flash(
            'Could not find task {id}.'.format(id=task_id),
            'danger',
        )
        return redirect(url_for('home'))

    date = parse.unquote_plus(task_date)
    if request.method == 'POST' and form.validate():
        return try_to_take_task_break(
            task_id=task_id,
            form=form,
            date=date,
        )
    else:
        return render_turkey(
            "task_break.html",
            form=form,
            task=task,
            task_date=date,
        )
예제 #21
0
def _parse_options(opts, delim):
    """Helper method for split_options which creates the options dict.
    Also handles the creation of a list for the URI tag_sets/
    readpreferencetags portion."""
    options = {}
    for opt in opts.split(delim):
        key, val = opt.split("=")
        if key.lower() == 'readpreferencetags':
            options.setdefault('readpreferencetags', []).append(val)
        else:
            # str(option) to ensure that a unicode URI results in plain 'str'
            # option names. 'normalized' is then suitable to be passed as
            # kwargs in all Python versions.
            if str(key) in options:
                warnings.warn("Duplicate URI option %s" % (str(key),))
            options[str(key)] = unquote_plus(val)

    # Special case for deprecated options
    if "wtimeout" in options:
        if "wtimeoutMS" in options:
            options.pop("wtimeout")
        warnings.warn("Option wtimeout is deprecated, use 'wtimeoutMS'"
                      " instead")

    return options
예제 #22
0
파일: service.py 프로젝트: pubbothq/pubbot
    def _recv_loop(self):
        buf = b''
        while True:
            try:
                data = self._socket.recv(512)
            except greenlet.GreenletExit:
                raise
            except Exception:
                eventlet.spawn(self.reconnect)

            buf += data
            pos = buf.find(b"\n")
            while pos >= 0:
                line = unquote_plus(force_str(buf[0:pos]))
                parts = line.split(' ')

                if len(parts) >= 3:
                    parts = line.split(' ', 2)
                    parts.pop(0)
                    self._recv_queue.put(parts)
                else:
                    # print len(parts), line
                    pass

                buf = buf[pos + 1:]
                pos = buf.find(b"\n")
예제 #23
0
    def __init__(self, *args, **kwargs):
        # options for kwargs are (url)|(folderName, path)

        if 'url' in kwargs and len(kwargs) == 1:
            match = folderAndPathRegex.match(kwargs['url'])
            folderName = match.groups()[0]
            self.folder = Folder.objects.get(name=folderName)
            self.relativePath = unquote_plus(match.groups()[2] or '')
            self.absPath = os.path.join(self.folder.localPath, self.relativePath)
            self.url = kwargs['url']
        elif 'path' in kwargs and len(kwargs) == 2:
            if 'folderName' in kwargs:
                self.folder = Folder.objects.get(name=kwargs['folderName'])
            elif 'folder' in kwargs:
                self.folder = kwargs['folder']
            else:
                raise FolderAndPathArgumentException(**kwargs)

            # just in case the path argument already has the folder localpath appended, try to replace the folder.localpath prefix
            self.relativePath = re.sub(r'^%s' % self.folder.localPath, '', kwargs['path'])

            self.absPath = os.path.join(self.folder.localPath, self.relativePath)
            self.url = "%s/%s" % (self.folder.name, self.relativePath)
        else:
            raise FolderAndPathArgumentException(**kwargs)
예제 #24
0
def _parse_rfc1738_args(name):
    # Modified to permit dots in the engine name.
    pattern = re.compile(r'''
            (?P<name>[\w\.\+]+)://
            (?:
                (?P<username>[^:/]*)
                (?::(?P<password>[^/]*))?
            @)?
            (?:
                (?P<host>[^/:]*)
                (?::(?P<port>[^/]*))?
            )?
            (?:/(?P<database>.*))?
            ''', re.X)

    m = pattern.match(name)
    if m is not None:
        components = m.groupdict()
        if components['database'] is not None:
            tokens = components['database'].split('?', 2)
            components['database'] = tokens[0]
            query = (len(tokens) > 1 and dict(parse_qsl(tokens[1]))) or None
        else:
            query = None
        components['query'] = query

        if components['password'] is not None:
            components['password'] = unquote_plus(components['password'])

        return components
    else:
        raise ValueError(
            "Could not parse rfc1738 URL from string '%s'" % name)
예제 #25
0
파일: task.py 프로젝트: geokala/quizify
def complete_old_task_view(task_id, task_date):
    form = CompleteOldTaskForm(request.form)

    try:
        task = Task.query.filter(Task.id == task_id, Task.owner_id == current_user.id).one()
    except NoResultFound:
        # This is not an existing task this user owns
        flash("Could not find task {id}.".format(id=task_id), "danger")
        return redirect(url_for("home"))

    date = parse.unquote_plus(task_date)

    creation_day = datetime.datetime.combine(task.creation_time, datetime.datetime.min.time())
    completion_day = datetime.datetime.strptime(date, "%Y %b %d")
    if completion_day < creation_day:
        flash(
            "Could not complete an old copy of {task_name} "
            "from before its creation date {creation}".format(task_name=task.name, creation=creation_day),
            "danger",
        )
        return redirect(url_for("task_history", task_id=task_id))
    if request.method == "POST" and form.validate():
        return try_to_complete_task(task_id=task_id, form=form, date=date)
    else:
        return render_turkey("complete_old_task.html", form=form, task=task, task_date=date)
예제 #26
0
 def do_POST(self):
     length = int(self.headers.get("Content-length", 0))
     if length:
         data = self.rfile.read(length)
         data = unquote_plus(data.decode("utf8"))
         self.data = data
     self.do_REQUEST()
예제 #27
0
        def split_text(text):
            start = 0
            text = quote_plus(text)
            length = len(text)
            while (length - start) > self._MAX_LENGTH_PER_QUERY:
                for seperator in self._SEPERATORS:
                    index = text.rfind(seperator, start, start+self._MAX_LENGTH_PER_QUERY)
                    if index != -1:
                        break
                else:
                    raise Error('input too large')
                end = index + len(seperator)
                yield unquote_plus(text[start:end])
                start = end

            yield unquote_plus(text[start:])
예제 #28
0
 def decode(self, raw, key):
     """
     Extract the envelope XML from its wrapping.
     """
     # It has already been URL-decoded once by Flask
     xml = unquote_plus(raw)
     return self.process_salmon_envelope(xml, key)
예제 #29
0
def message_wall_app(environ, start_response):
    output = StringIO()
    status = b'200 OK' # HTTP Status
    headers = [(b'Content-type', b'text/html; charset=utf-8')]
    start_response(status, headers)
    print("<h1>Message Wall</h1>",file=output)
    if environ['REQUEST_METHOD'] == 'POST':
        size = int(environ['CONTENT_LENGTH'])
        post_str = unquote_plus(environ['wsgi.input'].read(size).decode())
        form_vals = get_form_vals(post_str)
        form_vals['timestamp'] = datetime.datetime.now()
        cursor.execute("""insert into messages (user, message, ts) values
                       (:user,:message,:timestamp)""", form_vals) 
    path_vals = environ['PATH_INFO'][1:].split("/")
    user,*tag = path_vals
    cursor.execute("""select * from messages where user like ? or message
                    like ? order by ts""", (user,"@"+user+"%"))
    print(message_table(cursor.fetchall()), "<p>", file=output)
    
    print('<form method="POST">User: <input type="text" '
          'name="user">Message: <input type="text" '
          'name="message"><input type="submit" value="Send"></form>', 
           file=output)
    # The returned object is going to be printed
    return [html_page(output.getvalue())]
예제 #30
0
    def _fetch_basic(self, info_url=None):
        """ Fetch info url page and set member vars. """
        allinfo = get_video_info(self.videoid, newurl=info_url)
        new.callback("Fetched video info")

        def _get_lst(key, default="unknown", dic=allinfo):
            """ Dict get function, returns first index. """
            retval = dic.get(key, default)
            return retval[0] if retval != default else default

        self._title = _get_lst('title')
        self._dashurl = _get_lst('dashmpd')
        self._author = _get_lst('author')
        self._videoid = _get_lst('video_id')
        self._rating = float(_get_lst('avg_rating', 0.0))
        self._length = int(_get_lst('length_seconds', 0))
        self._viewcount = int(_get_lst('view_count'), 0)
        self._thumb = unquote_plus(_get_lst('thumbnail_url', ""))
        self._formats = [x.split("/") for x in _get_lst('fmt_list').split(",")]
        self._keywords = _get_lst('keywords', "").split(',')
        self._bigthumb = _get_lst('iurlsd', "")
        self._bigthumbhd = _get_lst('iurlsdmaxres', "")
        self.ciphertag = _get_lst("use_cipher_signature") == "True"
        self.sm = _extract_smap(g.UEFSM, allinfo, True)
        self.asm = _extract_smap(g.AF, allinfo, True)
        dbg("extracted stream maps")
예제 #31
0
 def urlDecode(self, s):
     return unquote_plus(s)
예제 #32
0
async def url_download(message: Message, url: str) -> Tuple[str, int]:
    """ download from link """
    pattern = r"^(?:(?:https|tg):\/\/)?(?:www\.)?(?:t\.me\/|openmessage\?)(?:(?:c\/(\d+))|(\w+)|(?:user_id\=(\d+)))(?:\/|&message_id\=)(\d+)(\?single)?$"  # noqa
    # group 1: private supergroup id, group 2: chat username,
    # group 3: private group/chat id, group 4: message id
    # group 5: check for download single media from media group
    match = re.search(pattern, url.split('|', 1)[0].strip())
    if match:
        chat_id = None
        msg_id = int(match.group(4))
        if match.group(1):
            chat_id = int("-100" + match.group(1))
        elif match.group(2):
            chat_id = match.group(2)
        elif match.group(3):
            chat_id = int(match.group(3))
        if chat_id and msg_id:
            resource = await message.client.get_messages(chat_id, msg_id)
            if resource.media_group_id and not bool(match.group(5)):
                output = await handle_download(message, resource, True)
            elif resource.media:
                output = await tg_download(message, resource, True)
            else:
                raise Exception("given tg link doesn't have any media")
            return output
        raise Exception("invalid telegram message link!")
    await message.edit("`Downloading From URL...`")
    start_t = datetime.now()
    custom_file_name = unquote_plus(os.path.basename(url))
    if "|" in url:
        url, c_file_name = url.split("|", maxsplit=1)
        url = url.strip()
        if c_file_name:
            custom_file_name = c_file_name.strip()
    dl_loc = os.path.join(config.Dynamic.DOWN_PATH, custom_file_name)
    downloader = SmartDL(url, dl_loc, progress_bar=False)
    downloader.start(blocking=False)
    with message.cancel_callback(downloader.stop):
        while not downloader.isFinished():
            total_length = downloader.filesize if downloader.filesize else 0
            downloaded = downloader.get_dl_size()
            percentage = downloader.get_progress() * 100
            speed = downloader.get_speed(human=True)
            estimated_total_time = downloader.get_eta(human=True)
            progress_str = \
                "__{}__\n" + \
                "```[{}{}]```\n" + \
                "**Progress** : `{}%`\n" + \
                "**URL** : `{}`\n" + \
                "**FILENAME** : `{}`\n" + \
                "**Completed** : `{}`\n" + \
                "**Total** : `{}`\n" + \
                "**Speed** : `{}`\n" + \
                "**ETA** : `{}`"
            progress_str = progress_str.format(
                "trying to download", ''.join(
                    (config.FINISHED_PROGRESS_STR
                     for _ in range(math.floor(percentage / 5)))), ''.join(
                         (config.UNFINISHED_PROGRESS_STR
                          for _ in range(20 - math.floor(percentage / 5)))),
                round(percentage, 2), url, custom_file_name,
                humanbytes(downloaded), humanbytes(total_length), speed,
                estimated_total_time)
            await message.edit(progress_str, disable_web_page_preview=True)
            await asyncio.sleep(config.Dynamic.EDIT_SLEEP_TIMEOUT)
    if message.process_is_canceled:
        raise ProcessCanceled
    return dl_loc, (datetime.now() - start_t).seconds
예제 #33
0
파일: index.py 프로젝트: quiltdata/quilt
def handler(event, context):
    """enumerate S3 keys in event, extract relevant data, queue events, send to
    elastic via bulk() API
    """
    logger_ = get_quilt_logger()
    # message is a proper SQS message, which either contains a single event
    # (from the bucket notification system) or batch-many events as determined
    # by enterprise/**/bulk_loader.py
    # An exception that we'll want to re-raise after the batch sends
    content_exception = None
    batch_processor = DocumentQueue(context)
    s3_client = make_s3_client()
    for message in event["Records"]:
        body = json.loads(message["body"])
        body_message = json.loads(body["Message"])
        if "Records" not in body_message:
            # could be TEST_EVENT, or another unexpected event; skip it
            logger_.error("No 'Records' key in message['body']: %s", message)
            continue
        events = body_message["Records"]
        # event is a single S3 event
        for event_ in events:
            validated = shape_event(event_)
            if not validated:
                logger_.debug("Skipping invalid event %s", event_)
                continue
            event_ = validated
            logger_.debug("Processing %s", event_)
            try:
                event_name = event_["eventName"]
                # Process all Create:* and Remove:* events
                if not any(event_name.startswith(n) for n in EVENT_PREFIX.values()):
                    logger_.warning("Skipping unknown event type: %s", event_name)
                    continue
                bucket = event_["s3"]["bucket"]["name"]
                # In the grand tradition of IE6, S3 events turn spaces into '+'
                # TODO: check if eventbridge events do the same thing with +
                key = unquote_plus(event_["s3"]["object"]["key"])
                version_id = event_["s3"]["object"].get("versionId", None)
                # ObjectRemoved:Delete does not include "eTag"
                etag = event_["s3"]["object"].get("eTag", "")
                # synthetic events from bulk scanner might define lastModified
                last_modified = (
                    event_["s3"]["object"].get("lastModified") or event_["eventTime"]
                )
                # Get two levels of extensions to handle files like .csv.gz
                path = pathlib.PurePosixPath(key)
                ext1 = path.suffix
                ext2 = path.with_suffix('').suffix
                ext = (ext2 + ext1).lower()
                # Handle delete and deletemarker first and then continue so that
                # head_object and get_object (below) don't fail
                if event_name.startswith(EVENT_PREFIX["Removed"]):
                    do_index(
                        s3_client,
                        batch_processor,
                        event_name,
                        bucket=bucket,
                        etag=etag,
                        ext=ext,
                        key=key,
                        last_modified=last_modified,
                        version_id=version_id
                    )
                    continue
                try:
                    head = retry_s3(
                        "head",
                        bucket,
                        key,
                        s3_client=s3_client,
                        version_id=version_id,
                        etag=etag
                    )
                except botocore.exceptions.ClientError as first:
                    logger_.warning("head_object error: %s", first)
                    # "null" version sometimes results in 403s for buckets
                    # that have changed versioning, retry without it
                    if (first.response.get('Error', {}).get('Code') == "403"
                            and version_id == "null"):
                        try:
                            head = retry_s3(
                                "head",
                                bucket,
                                key,
                                s3_client=s3_client,
                                version_id=None,
                                etag=etag
                            )
                        except botocore.exceptions.ClientError as second:
                            # this will bypass the DLQ but that's the right thing to do
                            # as some listed objects may NEVER succeed head requests
                            # (e.g. foreign owner) and there's no reason to torpedo
                            # the whole batch (which might include good files)
                            logger_.warning("Retried head_object error: %s", second)
                    logger_.error("Fatal head_object, skipping event: %s", event_)
                    continue
                # backfill fields based on the head_object
                size = head["ContentLength"]
                last_modified = last_modified or head["LastModified"].isoformat()
                etag = head.get("etag") or etag
                version_id = head.get("VersionId") or version_id
                try:
                    text = maybe_get_contents(
                        bucket,
                        key,
                        ext,
                        etag=etag,
                        version_id=version_id,
                        s3_client=s3_client,
                        size=size
                    )
                # we still want an entry for this document in elastic so that, e.g.,
                # the file counts from elastic are correct
                # these exceptions can happen for a variety of reasons (e.g. glacier
                # storage class, index event arrives after delete has occurred, etc.)
                # given how common they are, we shouldn't fail the batch for this
                except Exception as exc:  # pylint: disable=broad-except
                    text = ""
                    logger_.warning("Content extraction failed %s %s %s", bucket, key, exc)

                do_index(
                    s3_client,
                    batch_processor,
                    event_name,
                    bucket=bucket,
                    etag=etag,
                    ext=ext,
                    key=key,
                    last_modified=last_modified,
                    size=size,
                    text=text,
                    version_id=version_id
                )

            except botocore.exceptions.ClientError as boto_exc:
                if not should_retry_exception(boto_exc):
                    logger_.warning("Skipping non-fatal exception: %s", boto_exc)
                    continue
                logger_.critical("Failed record: %s, %s", event, boto_exc)
                raise boto_exc
    # flush the queue
    batch_processor.send_all()
예제 #34
0
 def make_key(self, *args):
     return ']['.join([unquote_plus(v) for v in args])
예제 #35
0
파일: index.py 프로젝트: realgam3/bugler
def report(user_id):
    user = mongo.db.users.find_one_or_404({"id": user_id})
    website = urlparse(user.get("website") or "")
    website_protocol = website.scheme
    website_hostname = unquote_plus(website.hostname)
    reported = False
    if website_protocol and website_protocol in ["http", "https"] \
            and website_hostname and urlparse(CHALLENGE_URL).hostname != website_hostname:
        reported = True
        try:
            connection = pika.BlockingConnection(
                pika.ConnectionParameters(host=RABITMQ_HOST,
                                          port=RABITMQ_PORT))
            channel = connection.channel()
            channel.queue_declare(queue='browser')
            channel.basic_publish(
                exchange='',
                routing_key='browser',
                body=json.dumps({
                    "actions": [
                        # Open User URL
                        {
                            "action":
                            "page.goto",
                            "args": [
                                user["website"], {
                                    'timeout': 3000,
                                    "waitUntil": 'domcontentloaded'
                                }
                            ]
                        },
                        # Wait 3 Seconds
                        {
                            "action": "page.waitFor",
                            "args": [3000]
                        },
                        # Close all pages
                        {
                            "action": "context.closePages",
                            "args": []
                        },
                        # Login
                        {
                            "action":
                            "page.goto",
                            "args": [
                                urljoin(CHALLENGE_URL, "/login"), {
                                    'timeout': 3000,
                                    "waitUntil": 'domcontentloaded'
                                }
                            ]
                        },
                        {
                            "action": "page.type",
                            "args": ["#exampleInputUsername", "admin"]
                        },
                        {
                            "action":
                            "page.type",
                            "args": [
                                "#exampleInputPassword",
                                "BSidesTLV2020{S3rv1ce_W0rk3rs@Y0urS3rvic3}"
                            ]
                        },
                        {
                            "action": "page.click",
                            "args": ["#signin"]
                        },
                        {
                            "action": "page.waitFor",
                            "args": [1000]
                        },
                    ]
                }))
            connection.close()
        except Exception as ex:
            reported = False
            print(ex)

    return jsonify({"reported": reported})
예제 #36
0
def register_view():
    result = request.args.get('result', '')
    result = unquote_plus(result)

    return render_template('register.html', result=result)
예제 #37
0
def main():
    description = __doc__
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument("input_files",
                        nargs="+",
                        default=[],
                        metavar="INFILE",
                        help="List of hit tables to process")
    parser.add_argument("-o",
                        "--outfile",
                        dest="output_file",
                        metavar="OUTFILE",
                        help="Write count table to OUTFILE")
    parser.add_argument("-l",
                        "--level",
                        dest="levels",
                        default=None,
                        metavar="LEVEL",
                        action="append",
                        help=""" Level(s) to collect counts on. Use flag
                      multiple times to specify multiple levels. If multiple
                      values given, one table produced for each with rank
                      name appended to file name. Levels can be an integer
                      (1-3) for KEGG or SEED levels, any one of 'gene',
                      'role', 'family',
                      'ko', or 'ortholog' (which are all synonyms), or
                      anything not synonymous with 'gene' to
                      get CAZy groups. Defaults to ortholog/role and
                      levels 1, 2, and 3 for KEGG and SEED
                      and gene and group for CAZy and COG.""")

    # option for deconvoluting clusters or assemblies
    add_weight_arguments(parser, multiple=True)

    # cutoff options
    add_count_arguments(parser)

    # format, ortholog heirarchy, and more
    kegg.add_path_arguments(
        parser,
        defaults={'countMethod': 'tophit'},
        choices={
            'countMethod': ('tophit', 'first', 'most', 'all', 'consensus')
        },
        helps={
            'countMethod':
            ("How to deal with counts from multiple hits. ('first': "
             "just use the first hit, 'most': "
             "can return multiple hits, 'all': return every hit, "
             "consensus: return None unless all the same). Do not "
             "use most or consensus with more than one level at a time. "
             "Default is 'tophit': This breaks any ties by choosing "
             "the most abundant hit based on other unambiguous "
             "assignments.")
        })

    # log level and help
    add_universal_arguments(parser)
    arguments = parser.parse_args()
    setup_logging(arguments)

    if len(arguments.input_files) == 0:
        parser.error("Must supply at least one m8 file to parse")

    # Set defaults and check for some conflicts
    if arguments.levels is None and arguments.heirarchyFile is None:
        # using hit names only
        arguments.levels = [None]
    else:
        if arguments.heirarchyFile is None \
                and arguments.heirarchyType != 'cazy':
            logging.warning("Type: %s", arguments.heirarchyType)
            parser.error("Cannot select levels without a heirarchy (ko) file")
        if arguments.levels is None:
            # set a default
            if arguments.heirarchyType is 'kegg':
                arguments.levels = ['ko', '1', '2', 'pathway']
            if arguments.heirarchyType is 'seed':
                arguments.levels = ['role', '1', '2', 'subsystem']
            else:
                arguments.levels = ['gene', 'group']

        try:
            # Make sure the rank lists make sense
            arguments.levels = cleanLevels(arguments.levels)
        except Exception as e:
            parser.error(str(e))

    # load weights file
    sequenceWeights = loadSequenceWeights(arguments.weights)

    # only print to stdout if there is a single level
    if len(arguments.levels) > 1 and arguments.output_file is None:
        parser.error("STDOUT only works if a single level is chosen!")

    cutoff = arguments.cutoff

    # map reads to hits
    if arguments.mapFile is not None:
        if arguments.mapStyle == 'auto':
            with open(arguments.mapFile) as f:
                firstLine = next(f)
                while len(firstLine) == 0 or firstLine[0] == '#':
                    firstLine = next(f)
            if koMapRE.search(firstLine):
                arguments.mapStyle = 'kegg'
            elif seedMapRE.search(firstLine):
                arguments.mapStyle = 'seed'
            elif tabMapRE.search(firstLine):
                arguments.mapStyle = 'tab'
            # elif cogMapRE.search(firstLine):
            #    arguments.mapStyle='cog'
            else:
                raise Exception(
                    "Cannot figure out map type from first line:\n%s" %
                    (firstLine))

        logging.info("Map file seems to be: %s", arguments.mapStyle)
        if arguments.mapStyle == 'kegg':
            valueMap = kegg.parseLinkFile(arguments.mapFile)
        elif arguments.mapStyle == 'seed':
            valueMap = kegg.parseSeedMap(arguments.mapFile)
        # elif arguments.mapStyle=='cog':
        #    valueMap=kegg.parseCogMap(arguments.mapFile)
        else:
            if arguments.parseStyle == GIS:
                keyType = int
            else:
                keyType = None
            valueMap = parseMapFile(arguments.mapFile,
                                    valueType=None,
                                    valueDelim=arguments.tab_map_delim,
                                    keyType=keyType)
        if len(valueMap) > 0:
            logging.info("Read %d items into map. EG: %s", len(valueMap),
                         next(iter(valueMap.items())))
        else:
            logging.warn("Read 0 items into value map!")
    else:
        valueMap = None

    # parse input files
    fileCounts = {}
    totals = {}
    fileLabels = {}
    sortedLabels = []

    # Allow for file names to be preceded with TAG=
    for filename in arguments.input_files:
        bits = filename.split("=", 1)
        if len(bits) > 1:
            (filetag, filename) = bits
        else:
            filetag = filename
        fileLabels[filename] = filetag
        # keep order so that column order matches arguments
        sortedLabels.append(filetag)
        fileCounts[filetag] = {}
        totals[filetag] = 0

    # TODO: incorporate weights into tophit algorithm!
    if arguments.countMethod == 'tophit':
        # Process all files at once and use overall abundance to pick best hits
        from edl import redistribute
        params = FilterParams.create_from_arguments(arguments)
        multifile = redistribute.multipleFileWrapper(fileLabels.items())

        # don't give any hit translation, just use hit ids for redistribution
        readHits = redistribute.pickBestHitByAbundance(
            multifile,
            filterParams=params,
            returnLines=False,
            winnerTakeAll=True,
            parseStyle=arguments.parseStyle,
            sequenceWeights=sequenceWeights)
        # define method to turn Hits into Genes (kos, families)
        hitTranslator = getHitTranslator(parseStyle=arguments.parseStyle,
                                         hitStringMap=valueMap)
        # translateHit = lambda hit: hitTranslator.translateHit(hit)[0]

        # use read->file mapping and hit translator to get file based counts
        #  from returned (read,Hit) pairs
        increment = 1
        for (read_name, hit) in readHits:
            file_tag, read_name = read_name.split("/", 1)
            file_tag = unquote_plus(file_tag)
            gene = hitTranslator.translateHit(hit)[0]
            if gene is None:
                gene = "None"
            logging.debug("READ: %s\t%s\t%s\t%s", file_tag, read_name, hit.hit,
                          gene)
            genecount = fileCounts[file_tag].setdefault(gene, 0)
            if sequenceWeights is not None:
                increment = sequenceWeights.get(read_name, 1)
            fileCounts[file_tag][gene] = genecount + increment
            totals[file_tag] += increment
        logging.debug(str(totals))

    else:
        # Original way, just process each file separately
        for (filename, filetag) in fileLabels.items():
            infile = open(filename, 'rU')

            hitIter = parseM8FileIter(infile,
                                      valueMap,
                                      arguments.hitTableFormat,
                                      arguments.filter_top_pct,
                                      arguments.parseStyle,
                                      arguments.countMethod,
                                      ignoreEmptyHits=arguments.mappedHitsOnly)

            (total, counts, hitMap) = \
                countIterHits(hitIter,
                              allMethod=arguments.allMethod,
                              weights=sequenceWeights)
            fileCounts[filetag] = counts
            totals[filetag] = total

            logging.info("parsed %d hits (%d unique) for %d reads from %s",
                         total, len(counts), len(hitMap), filename)

            infile.close()

    logging.debug(repr(fileCounts))
    printCountTablesByLevel(fileCounts, totals, sortedLabels, arguments)
예제 #38
0
파일: views.py 프로젝트: mediafactory/yats
def show_board(request, name):
    # http://bootsnipp.com/snippets/featured/kanban-board
    """
        board structure

        [
            {
                'column': 'closed',
                'query': {'closed': False},
                'limit': 10,
                'extra_filter': 1, # 1 = days since closed, 2 = days since created, 3 = days since last changed, 4 days since last action
                'days': 1, # days
                'order_by': 'id',
                'order_dir': ''
            }
        ]
    """

    name = unquote_plus(name)

    if request.method == 'POST':
        if 'method' in request.POST:
            board = boards.objects.get(active_record=True,
                                       pk=request.POST['board'],
                                       c_user=request.user)
            try:
                columns = json.loads(board.columns)
            except:
                columns = []

            if request.POST['method'] == 'add':
                form = AddToBordForm(request.POST)
                if form.is_valid():
                    cd = form.cleaned_data
                    col = {
                        'column': cd['column'],
                        'query': request.session['last_search'],
                        'limit': cd['limit'],
                        'order_by': cd['order_by'],
                        'order_dir': cd['order_dir']
                    }
                    if cd.get('extra_filter') and cd.get('days'):
                        col['extra_filter'] = cd['extra_filter']
                        col['days'] = cd['days']
                    columns.append(col)
                    board.columns = json.dumps(columns, cls=DjangoJSONEncoder)
                    board.save(user=request.user)

                else:
                    err_list = []
                    for field in form:
                        for err in field.errors:
                            err_list.append('%s: %s' % (field.name, err))
                    messages.add_message(
                        request, messages.ERROR,
                        _('data invalid: %s') % '\n'.join(err_list))

                return HttpResponseRedirect('/board/%s/' %
                                            urlquote_plus(board.name))

        else:
            if request.POST['boardname'].strip() != '':
                if boards.objects.filter(active_record=True,
                                         c_user=request.user,
                                         name=request.POST['boardname']).count(
                                         ) == 0 and request.POST['boardname']:
                    board = boards()
                    board.name = request.POST['boardname'].strip()
                    board.save(user=request.user)

                    return HttpResponseRedirect(
                        '/board/%s/' %
                        urlquote_plus(request.POST['boardname']))

                else:
                    messages.add_message(
                        request, messages.ERROR,
                        _(u'A board with the name "%s" already exists' %
                          request.POST['boardname']))
                    return HttpResponseRedirect('/')
            else:
                messages.add_message(request, messages.ERROR,
                                     _(u'No name for a board given'))
                return HttpResponseRedirect('/')

    else:
        board = boards.objects.get(active_record=True,
                                   name=name,
                                   c_user=request.user)
        try:
            columns = json.loads(board.columns)
        except:
            columns = []

        if 'method' in request.GET and request.GET['method'] == 'del':
            new_columns = []
            for col in columns:
                if col['column'] != request.GET['column']:
                    new_columns.append(col)
            board.columns = json.dumps(new_columns, cls=DjangoJSONEncoder)
            board.save(user=request.user)

            return HttpResponseRedirect('/board/%s/' % urlquote_plus(name))

        elif 'method' in request.GET and request.GET['method'] == 'delete':
            board.delete(user=request.user)
            return HttpResponseRedirect('/')

    for column in columns:
        query = get_ticket_model().objects.select_related(
            'type', 'state', 'assigned', 'priority', 'customer').all()
        search_params, query = build_ticket_search_ext(request, query,
                                                       column['query'])
        column['query'] = query.order_by(
            '%s%s' %
            (column.get('order_dir', ''), column.get('order_by', 'id')))
        if 'extra_filter' in column and 'days' in column and column[
                'extra_filter'] and column['days']:
            if column['extra_filter'] == '1':  # days since closed
                column['query'] = column['query'].filter(
                    close_date__gte=datetime.date.today() -
                    datetime.timedelta(days=column['days'])).exclude(
                        close_date=None)
            if column['extra_filter'] == '2':  # days since created
                column['query'] = column['query'].filter(
                    c_date__gte=datetime.date.today() -
                    datetime.timedelta(days=column['days']))
            if column['extra_filter'] == '3':  # days since last changed
                column['query'] = column['query'].filter(
                    u_date__gte=datetime.date.today() -
                    datetime.timedelta(days=column['days']))
            if column['extra_filter'] == '4':  # days since last action
                column['query'] = column['query'].filter(
                    last_action_date__gte=datetime.date.today() -
                    datetime.timedelta(days=column['days']))
        if not request.user.is_staff:
            column['query'] = column['query'].filter(
                customer=request.organisation)

        seen_elements = {}
        seen = tickets_participants.objects.filter(
            user=request.user,
            ticket__in=column['query'].values_list('id',
                                                   flat=True)).values_list(
                                                       'ticket_id', 'seen')
        for see in seen:
            seen_elements[see[0]] = see[1]

        seen = tickets_ignorants.objects.filter(
            user=request.user,
            ticket__in=column['query'].values_list(
                'id', flat=True)).values_list('ticket_id')
        for see in seen:
            seen_elements[see[0]] = True

        if column['limit']:
            column['query'] = column['query'][:column['limit']]
        column['seen'] = seen_elements

    add_breadcrumbs(request, board.pk, '$')
    return render(request, 'board/view.html', {
        'columns': columns,
        'board': board
    })
예제 #39
0
def stripUnquoteURL(url):
    if url.startswith('image://'):
        url = urllib.unquote_plus(url.replace('image://', '').strip('/'))
    else:
        url = urllib.unquote_plus(url.strip('/'))
    return url
예제 #40
0
    def __getitem__(self, item):
        if "%" in item:
            item = unquote_plus(item)

        return self._db[item]
예제 #41
0
    def __setitem__(self, key, value):
        if '%' in key:
            key = unquote_plus(key)

        self._db[key] = value
        self.dump()
예제 #42
0
파일: utils.py 프로젝트: glebgav/WebCrawler
 def format_file_name_to_url(file_name: str):
     return unquote_plus(file_name).rstrip(".txt")
예제 #43
0
파일: views.py 프로젝트: sfrost/pgeu-system
def paypal_return_handler(request, methodid):
    tx = 'UNKNOWN'

    method = get_object_or_404(InvoicePaymentMethod,
                               pk=int(methodid),
                               active=True)
    pm = method.get_implementation()

    # Custom error return that can get to the request context
    def paypal_error(reason):
        return render(request, 'paypal/error.html', {
            'reason': reason,
        })

    # Logger for the invoice processing - we store it in the genereal
    # paypal logs
    def payment_logger(msg):
        ErrorLog(
            timestamp=timezone.now(),
            sent=False,
            message='Paypal automatch for %s: %s' % (tx, msg),
            paymentmethod=method,
        ).save()

    # Now for the main handler

    # Handle a paypal PDT return
    if 'tx' not in request.GET:
        return paypal_error('Transaction id not received from paypal')

    tx = request.GET['tx']
    # We have a transaction id. First we check if we already have it
    # in the database.
    # We only store transactions with status paid, so if it's in there,
    # then it's already paid, and what's happening here is a replay
    # (either by mistake or intentional). So we don't redirect the user
    # at this point, we just give an error message.
    try:
        ti = TransactionInfo.objects.get(paypaltransid=tx)
        return HttpResponseForbidden(
            'This transaction has already been processed')
    except TransactionInfo.DoesNotExist:
        pass

    # We haven't stored the status of this transaction. It either means
    # this is the first load, or that we have only seen pending state on
    # it before. Thus, we need to post back to paypal to figure out the
    # current status.
    try:
        params = {
            'cmd': '_notify-synch',
            'tx': tx,
            'at': pm.config('pdt_token'),
        }
        resp = requests.post(pm.get_baseurl(), data=params)
        if resp.status_code != 200:
            raise Exception("status code {0}".format(resp.status_code))
        r = resp.text
    except Exception as ex:
        # Failed to talk to paypal somehow. It should be ok to retry.
        return paypal_error('Failed to verify status with paypal: %s' % ex)

    # First line of paypal response contains SUCCESS if we got a valid
    # response (which might *not* mean it's actually a payment!)
    lines = r.split("\n")
    if lines[0] != 'SUCCESS':
        return paypal_error('Received an error from paypal.')

    # Drop the SUCCESS line
    lines = lines[1:]

    # The rest of the response is urlencoded key/value pairs
    d = dict([unquote_plus(line).split('=') for line in lines if line != ''])

    # Validate things that should never be wrong
    try:
        if d['txn_id'] != tx:
            return paypal_error('Received invalid transaction id from paypal')
        if d['txn_type'] != 'web_accept':
            return paypal_error(
                'Received transaction type %s which is unknown by this system!'
                % d['txn_type'])
        if d['business'] != pm.config('email'):
            return paypal_error(
                'Received payment for %s which is not the correct recipient!' %
                d['business'])
        if d['mc_currency'] != settings.CURRENCY_ABBREV:
            return paypal_error(
                'Received payment in %s, not %s. We cannot currently process this automatically.'
                % (d['mc_currency'], settings.CURRENCY_ABBREV))
    except KeyError as k:
        return paypal_error('Mandatory field %s is missing from paypal data!',
                            k)

    # Now let's find the state of the payment
    if 'payment_status' not in d:
        return paypal_error('Payment status not received from paypal!')

    if d['payment_status'] == 'Completed':
        # Payment is completed. Create a paypal transaction info
        # object for it, and then try to match it to an invoice.

        # Double-check if it is already added. We did check this furter
        # up, but it seems it can sometimes be called more than once
        # asynchronously, due to the check with paypal taking too
        # long.
        if TransactionInfo.objects.filter(paypaltransid=tx).exists():
            return HttpResponse("Transaction already processed",
                                content_type='text/plain')

        # Paypal seems to randomly change which field actually contains
        # the transaction title.
        if d.get('transaction_subject', ''):
            transtext = d['transaction_subject']
        else:
            transtext = d['item_name']
        ti = TransactionInfo(paypaltransid=tx,
                             timestamp=timezone.now(),
                             paymentmethod=method,
                             sender=d['payer_email'],
                             sendername=d['first_name'] + ' ' + d['last_name'],
                             amount=Decimal(d['mc_gross']),
                             fee=Decimal(d['mc_fee']),
                             transtext=transtext,
                             matched=False)
        ti.save()

        # Generate URLs that link back to paypal in a way that we can use
        # from the accounting system. Note that this is an undocumented
        # URL format for paypal, so it may stop working at some point in
        # the future.
        urls = [
            "%s?cmd=_view-a-trans&id=%s" % (
                pm.get_baseurl(),
                ti.paypaltransid,
            ),
        ]

        # Separate out donations made through our website
        if ti.transtext == pm.config('donation_text'):
            ti.matched = True
            ti.matchinfo = 'Donation, automatically matched'
            ti.save()

            # Generate a simple accounting record, that will have to be
            # manually completed.
            accstr = "Paypal donation %s" % ti.paypaltransid
            accrows = [
                (pm.config('accounting_income'), accstr, ti.amount - ti.fee,
                 None),
                (pm.config('accounting_fee'), accstr, ti.fee, None),
                (settings.ACCOUNTING_DONATIONS_ACCOUNT, accstr, -ti.amount,
                 None),
            ]
            create_accounting_entry(accrows, True, urls)

            return render(request, 'paypal/noinvoice.html', {})

        invoicemanager = InvoiceManager()
        (r, i, p) = invoicemanager.process_incoming_payment(
            ti.transtext,
            ti.amount,
            "Paypal id %s, from %s <%s>, auto" %
            (ti.paypaltransid, ti.sendername, ti.sender),
            ti.fee,
            pm.config('accounting_income'),
            pm.config('accounting_fee'),
            urls,
            payment_logger,
            method,
        )
        if r == invoicemanager.RESULT_OK:
            # Matched it!
            ti.matched = True
            ti.matchinfo = 'Matched standard invoice (auto)'
            ti.save()

            # Now figure out where to return the user. This comes from the
            # invoice processor, assuming we have one
            if p:
                url = p.get_return_url(i)
            else:
                # No processor, so redirect the user back to the basic
                # invoice page.
                if i.recipient_user:
                    # Registered to a specific user, so request that users
                    # login on redirect
                    url = "%s/invoices/%s/" % (settings.SITEBASE, i.pk)
                else:
                    # No user account registered, so send back to the secret
                    # url version
                    url = "%s/invoices/%s/%s/" % (settings.SITEBASE, i.pk,
                                                  i.recipient_secret)

            return render(request, 'paypal/complete.html', {
                'invoice': i,
                'url': url,
            })
        else:
            # Did not match an invoice anywhere!
            # We'll leave the transaction in the paypal transaction
            # list, where it will generate an alert in the nightly mail.
            return render(request, 'paypal/noinvoice.html', {})

    # For a pending payment, we set ourselves up with a redirect loop
    if d['payment_status'] == 'Pending':
        try:
            pending_reason = d['pending_reason']
        except Exception as e:
            pending_reason = 'no reason given'
        return render(request, 'paypal/pending.html', {
            'reason': pending_reason,
        })
    return paypal_error('Unknown payment status %s.' % d['payment_status'])
예제 #44
0
def lambda_handler(event, context):
  logger.info(json.dumps(event))
  for record in event['Records']:
#Grab the file name from the event record which triggered the lambda function & Construct the path for data file, name file and renamed file. 
        bucket = record['s3']['bucket']['name']
        key = unquote_plus(record['s3']['object']['key'])
        data_file_name = key.split('/')[0]
        s3_data_file_path = 's3://' + bucket + '/' + data_file_name +'/'
        name_file_bucket = DestinationBucket
        name_file_prefix = data_file_name+'/'+ data_file_name + 'NameFile'
        name_file_path = 's3://'+name_file_bucket+'/' + name_file_prefix 
      
#Create crawler for the data file if it does not already exist and run it.   
        try:
            crawler = glue.get_crawler(Name=data_file_name)
        except glue.exceptions.EntityNotFoundException as e:
            crawler = glue.create_crawler(
                Name=data_file_name,
                    Role= GlueServiceRole,
                    DatabaseName='sampledb',
                    Description='Crawler for data files',
                    Targets={
                        'S3Targets': [
                            {
                                'Path': s3_data_file_path,
                                'Exclusions': [
                                ]
                            },
                        ]
                    },
                    SchemaChangePolicy={
                    'UpdateBehavior': 'UPDATE_IN_DATABASE',
                    'DeleteBehavior': 'DELETE_FROM_DATABASE'
                }
                #,Configuration='{ "Version": 2.0, "CrawlerOutput": { "Partitions": { "AddOrUpdateBehavior": "InheritFromTable" } } }'
            )
            response = glue.start_crawler(
            Name=data_file_name)
        else:
            response = glue.start_crawler(
            Name=data_file_name)
            
#Create crawler for the name file if it does not already exist and run it.   

        try:
            crawler = glue.get_crawler(Name=data_file_name + '_name_file')
        except glue.exceptions.EntityNotFoundException as e:
            crawler = glue.create_crawler(
                    Name=data_file_name + '_name_file',
                    Role= GlueServiceRole,
                    DatabaseName='sampledb',
                    Description='Crawler for name files',
                    Targets={
                        'S3Targets': [
                            {
                                'Path': name_file_path,
                                'Exclusions': [
                                ]
                            },
                        ]
                    },
                    SchemaChangePolicy={
                    'UpdateBehavior': 'UPDATE_IN_DATABASE',
                    'DeleteBehavior': 'DELETE_FROM_DATABASE'
                    }
                #,Configuration='{ "Version": 2.0, "CrawlerOutput": { "Partitions": { "AddOrUpdateBehavior": "InheritFromTable" } } }'
                    )
            response = glue.start_crawler(Name=data_file_name+'_name_file')
        else:
            response = glue.start_crawler(Name=data_file_name+'_name_file')

#Run the agnostic Glue job to renamed the files by passing the file name argument. 

        try:
            glue.start_job_run( Arguments = {'--FileName': data_file_name, '--DestinationBucketName' : DestinationBucket })
        except Exception as e:
            print('Glue Job runtime Issue')
예제 #45
0
def ratings(request, username=None, key=None, host=None):
    if username is not None:
        coder = get_object_or_404(Coder, user__username=username)
        statistics = Statistics.objects.filter(account__coders=coder)
    else:
        key = unquote_plus(key)
        host = unquote_plus(host)
        account = get_object_or_404(Account, key=key, resource__host=host)
        statistics = Statistics.objects.filter(account=account)

    resource_host = request.GET.get('resource')
    if resource_host:
        statistics = statistics.filter(contest__resource__host=resource_host)

    qs = statistics \
        .annotate(date=F('contest__end_time')) \
        .annotate(name=F('contest__title')) \
        .annotate(host=F('contest__resource__host')) \
        .annotate(new_rating=Cast(KeyTextTransform('new_rating', 'addition'), IntegerField())) \
        .annotate(old_rating=Cast(KeyTextTransform('old_rating', 'addition'), IntegerField())) \
        .annotate(rating_change=Cast(KeyTextTransform('rating_change', 'addition'), IntegerField())) \
        .annotate(score=F('solving')) \
        .annotate(addition_solved=KeyTextTransform('solved', 'addition')) \
        .annotate(solved=Cast(KeyTextTransform('solving', 'addition_solved'), IntegerField())) \
        .annotate(problems=KeyTextTransform('problems', 'contest__info')) \
        .annotate(division=KeyTextTransform('division', 'addition')) \
        .annotate(cid=F('contest__pk')) \
        .annotate(ratings=F('contest__resource__ratings')) \
        .annotate(is_unrated=Cast(KeyTextTransform('is_unrated', 'contest__info'), IntegerField())) \
        .filter(Q(is_unrated__isnull=True) | Q(is_unrated=0)) \
        .filter(new_rating__isnull=False) \
        .filter(contest__resource__has_rating_history=True) \
        .filter(contest__stage__isnull=True) \
        .order_by('date') \
        .values(
            'cid',
            'name',
            'host',
            'date',
            'new_rating',
            'old_rating',
            'rating_change',
            'place',
            'score',
            'ratings',
            'solved',
            'problems',
            'division',
        )

    ratings = {
        'status': 'ok',
        'data': {},
    }

    dates = list(sorted(set(r['date'] for r in qs)))
    ratings['data']['dates'] = dates
    ratings['data']['resources'] = {}

    for r in qs:
        colors = r.pop('ratings')

        division = r.pop('division')
        problems = json.loads(r.pop('problems') or '{}')
        if division and 'division' in problems:
            problems = problems['division'][division]
        r['n_problems'] = len(problems)

        date = r['date']
        if request.user.is_authenticated and request.user.coder:
            date = timezone.localtime(
                date, pytz.timezone(request.user.coder.timezone))
        r['when'] = date.strftime('%b %-d, %Y')
        resource = ratings['data']['resources'].setdefault(r['host'], {})
        resource['colors'] = colors
        if r['new_rating'] > resource.get('highest', {}).get('value', 0):
            resource['highest'] = {
                'value': r['new_rating'],
                'timestamp': int(date.timestamp()),
            }
        r['slug'] = slugify(r['name'])
        resource.setdefault('data', [])
        if r['rating_change'] is not None and r['old_rating'] is None:
            r['old_rating'] = r['new_rating'] - r['rating_change']

        if resource['data'] and r['old_rating']:
            last = resource['data'][-1]
            if last['new_rating'] != r['old_rating']:
                logger.warning(f"prev = {last}, curr = {r}")
        resource['data'].append(r)

    return JsonResponse(ratings)
예제 #46
0
def register_view(request):
    result = request.query.get('result', '')
    result = unquote_plus(result)

    return html_response('register.html', result=result)
예제 #47
0
from urllib.parse import unquote_plus

path = sys.argv[0]

path = path[:-6]

file = open(str(path) + "/voiceq.txt")

params = list()

for line in file:
    line = line.rstrip('\n')
    x = line.split("=", 1)
    params.append(x[1])

file.close()

msg = params[0]
name = params[1]
lng = params[2]

msg = unquote_plus(msg, 'cp1251')

tts = gTTS(msg, lang=lng)
tts.save(str(path) + "/conv/" + str(name) + ".mp3")

subprocess.run("ffmpeg -i " + str(path) + "/conv/" + str(name) +
               ".mp3 -vn -ar 44100 -ac 2 -b:a 64k " + str(path) + "/lines/" +
               str(name) + ".ogg -y",
               shell=True)
예제 #48
0
def buscartrailer(item, trailers=[]):
    logger.info()

    # List of actions if run from context menu
    if item.action == "manual_search" and item.contextual:
        itemlist = manual_search(item)
        item.contentTitle = itemlist[0].contentTitle
    elif 'search' in item.action and item.contextual:
        itemlist = globals()[item.action](item)
    else:
        # Remove Trailer Search option from context menu to avoid redundancies
        if isinstance(item.context, str) and "buscar_trailer" in item.context:
            item.context = item.context.replace("buscar_trailer", "")
        elif isinstance(item.context,
                        list) and "buscar_trailer" in item.context:
            item.context.remove("buscar_trailer")

        item.text_color = ""

        itemlist = []
        if item.search_title:
            item.contentTitle = urllib.unquote_plus(item.search_title)
        elif item.contentTitle != "":
            item.contentTitle = item.contentTitle.strip()
        elif keyboard:
            contentTitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '',
                                  item.contentTitle.strip())
            item.contentTitle = platformtools.dialog_input(
                default=contentTitle,
                heading=config.get_localized_string(70505))
            if item.contentTitle is None:
                item.contentTitle = contentTitle
            else:
                item.contentTitle = item.contentTitle.strip()
        else:
            contentTitle = re.sub('\[\/*(B|I|COLOR)\s*[^\]]*\]', '',
                                  item.contentTitle.strip())
            item.contentTitle = contentTitle

        item.year = item.infoLabels['year']

        logger.info("Search: %s" % item.contentTitle)
        logger.info("Year: %s" % item.year)
        if item.infoLabels['trailer'] and not trailers:
            url = item.infoLabels['trailer']
            if "youtube" in url:
                url = url.replace("embed/", "watch?v=")
            titulo, url, server = servertools.findvideos(url)[0]
            title = "Trailer  [" + server + "]"
            itemlist.append(
                item.clone(title=title, url=url, server=server, action="play"))
        if item.show or item.infoLabels[
                'tvshowtitle'] or item.contentType != "movie":
            tipo = "tv"
        else:
            tipo = "movie"
        try:
            if not trailers:
                itemlist.extend(tmdb_trailers(item, tipo))
            else:
                for trailer in trailers:
                    title = trailer['name'] + " [" + trailer[
                        'size'] + "p] (" + trailer['language'].replace(
                            "en", "ING").replace("it",
                                                 "ITA") + ")  [tmdb/youtube]"
                    itemlist.append(
                        item.clone(action="play",
                                   title=title,
                                   url=trailer['url'],
                                   server="youtube"))
        except:
            import traceback
            logger.error(traceback.format_exc())

        if item.contextual:
            title = "%s"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % config.get_localized_string(70507),
                       action="youtube_search"))
        itemlist.append(
            item.clone(title=title % config.get_localized_string(70024),
                       action="filmaffinity_search"))
        # If it is a series, the option to search in Abandomoviez is not included
        if not item.show and not item.infoLabels['tvshowtitle']:
            itemlist.append(
                item.clone(title=title % config.get_localized_string(70508),
                           action="abandomoviez_search"))

    if item.contextual:
        global window_select, result
        select = Select("DialogSelect.xml",
                        config.get_runtime_path(),
                        item=item,
                        itemlist=itemlist,
                        caption=config.get_localized_string(70506) +
                        item.contentTitle)
        window_select.append(select)
        select.doModal()

        if item.windowed:
            return result, window_select
    else:
        return itemlist
예제 #49
0
 def unquote_plus(*args, **kwargs):
     return unquote_plus(*args, **kwargs)
예제 #50
0
def get_video_path(video_name):
    return render_template("player.html",
                           m3u8_url="/" + video_name + "/index.m3u8",
                           video_name=unquote_plus(video_name))
예제 #51
0
def urlunquote_plus(quoted_url):
    """
    A wrapper for Python's urllib.unquote_plus() function that can operate on
    the result of django.utils.http.urlquote_plus().
    """
    return force_text(urllib_parse.unquote_plus(smart_str(quoted_url)))
예제 #52
0
 def param_decode(self, value):
     unquote(value) if re.search("%20", value) else unquote_plus(value)
     return value
예제 #53
0
    def getCVObjectData(self, response):
        '''
        Gathers object data from a response and tests each value to make sure
        it exists in the response before trying to set it.

        CVID and CVURL will always exist in a ComicVine response, so there
        is no need to verify this data.

        Returns a dictionary with all the gathered data.
        '''

        # Get Name
        name = ''
        if 'name' in response:
            if response['name']:
                name = response['name']

        # Get Start Year (only exists for Series objects)
        year = ''
        if 'start_year' in response:
            if response['start_year']:
                year = response['start_year']

        # Get Number (only exists for Issue objects)
        number = ''
        if 'issue_number' in response:
            if response['issue_number']:
                number = response['issue_number']

        # Get Description (Favor short description if available)
        desc = ''
        if 'deck' in response:
            if response['deck']:
                # Check to see if the deck is a space (' ').
                if response['deck'] != ' ':
                    desc = response['deck']
            if desc == '':
                if 'description' in response:
                    if response['description']:
                        desc = response['description']

        # Get Image
        image = ''
        if 'image' in response:
            if response['image']:
                image_url = self.imageurl + \
                    response['image']['super_url'].rsplit('/', 1)[-1]
                image_filename = unquote_plus(image_url.split('/')[-1])
                if image_filename != '1-male-good-large.jpg' and not re.match(".*question_mark_large.*.jpg", image_filename):
                    try:
                        image = utils.test_image(urlretrieve(
                            image_url, 'media/images/' + image_filename)[0])
                    except OSError as e:
                        self.logger.error(
                            f'getCVObjectData retrieve image - {e}')
                        image = None

        # Create data object
        data = {
            'cvid': response['id'],
            'cvurl': response['site_detail_url'],
            'name': name,
            'year': year,
            'number': number,
            'desc': utils.cleanup_html(desc, True),
            'image': image,
        }

        return data
예제 #54
0
    def scrape_story_metadata(self, story_id):
        """
        Returns a dictionary with the metadata for the story.

        Attributes:
            -id: the id of the story
            -canon_type: the type of canon
            -canon: the name of the canon
            -author_id: the user id of the author
            -title: the title of the story
            -updated: the timestamp of the last time the story was updated
            -published: the timestamp of when the story was originally published
            -lang: the language the story is written in
            -genres: a list of the genres that the author categorized the story as
            -num_reviews
            -num_favs
            -num_follows
            -num_words: total number of words in all chapters of the story
            -rated: the story's rating.
        """
        url = '{0}/s/{1}'.format(self.base_url, story_id)
        result = requests.get(url)
        html = result.content
        soup = BeautifulSoup(html, self.parser)
        pre_story_links = soup.find(id='pre_story_links').find_all('a')
        author_id = int(
            re.search(r"var userid = (.*);", str(soup)).groups()[0])
        title = re.search(r"var title = (.*);", str(soup)).groups()[0]
        title = unquote_plus(title)[1:-1]
        metadata_div = soup.find(id='profile_top')
        times = metadata_div.find_all(attrs={'data-xutime': True})
        metadata_text = metadata_div.find(class_='xgray xcontrast_txt').text
        metadata_parts = metadata_text.split('-')
        genres = self.get_genres(metadata_parts[2].strip())
        metadata = {
            'id': story_id,
            'canon_type': pre_story_links[0].text,
            'canon': pre_story_links[1].text,
            'author_id': author_id,
            'title': title,
            'updated': int(times[0]['data-xutime']),
            'published': int(times[1]['data-xutime']),
            'lang': metadata_parts[1].strip(),
            'genres': genres
        }
        for parts in metadata_parts:
            parts = parts.strip()
            tag_and_val = parts.split(':')
            if len(tag_and_val) != 2:
                continue
            tag, val = tag_and_val
            tag = tag.strip().lower()
            if tag not in metadata:
                val = val.strip()
                try:
                    val = int(val.replace(',', ''))
                    metadata['num_' + tag] = val
                except:
                    metadata[tag] = val
        if 'status' not in metadata:
            metadata['status'] = 'Incomplete'
        return metadata
예제 #55
0
파일: http.py 프로젝트: Hollow-user/qr
def urlunquote_plus(quoted_url):
    """
    A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
    function. (was used for unicode handling on Python 2)
    """
    return unquote_plus(quoted_url)
예제 #56
0
    def download_file(self):
        logger.info("Direct download")

        headers = []

        # Se asegura de que el fichero se podrá crear
        logger.info("nombrefichero=" + self.file_name)
        self.file_name = filetools.makeLegalFilename(self.file_name)
        logger.info("nombrefichero=" + self.file_name)
        logger.info("url=" + self.url)

        # Crea el fichero
        existSize = 0
        f = open(self.file_name, 'wb')
        grabado = 0

        # Interpreta las cabeceras en una URL como en XBMC
        if "|" in self.url:
            additional_headers = self.url.split("|")[1]
            if "&" in additional_headers:
                additional_headers = additional_headers.split("&")
            else:
                additional_headers = [additional_headers]

            for additional_header in additional_headers:
                logger.info("additional_header: " + additional_header)
                name = re.findall("(.*?)=.*?", additional_header)[0]
                value = urllib.unquote_plus(
                    re.findall(".*?=(.*?)$", additional_header)[0])
                headers.append([name, value])

            self.url = self.url.split("|")[0]
            logger.info("url=" + self.url)

        # Timeout del socket a 60 segundos
        socket.setdefaulttimeout(60)

        # Crea la petición y añade las cabeceras
        h = urllib2.HTTPHandler(debuglevel=0)
        request = urllib2.Request(self.url)
        for header in headers:
            logger.info("Header=" + header[0] + ": " + header[1])
            request.add_header(header[0], header[1])

        # Lanza la petición
        opener = urllib2.build_opener(h)
        urllib2.install_opener(opener)
        try:
            connexion = opener.open(request)
        except urllib_error.HTTPError as e:
            logger.error("error %d (%s) al abrir la url %s" %
                         (e.code, e.msg, self.url))
            # print e.code
            # print e.msg
            # print e.hdrs
            # print e.fp
            f.close()

            # El error 416 es que el rango pedido es mayor que el fichero => es que ya está completo
            if e.code == 416:
                return 0
            else:
                return -2

        try:
            totalfichero = int(connexion.headers["Content-Length"])
        except:
            totalfichero = 1

        self.total_size = int(float(totalfichero) / float(1024 * 1024))

        logger.info("Content-Length=%s" % totalfichero)
        blocksize = 100 * 1024

        bloqueleido = connexion.read(blocksize)
        logger.info("Iniciando descarga del fichero, bloqueleido=%s" %
                    len(bloqueleido))

        maxreintentos = 10

        while len(bloqueleido) > 0:
            try:
                if os.path.exists(self.force_stop_file_name):
                    logger.info(
                        "Detectado fichero force_stop, se interrumpe la descarga"
                    )
                    f.close()

                    xbmc.executebuiltin((
                        u'XBMC.Notification("Cancelado", "Descarga en segundo plano cancelada", 300)'
                    ))

                    return

                # Escribe el bloque leido
                # try:
                #    import xbmcvfs
                #    f.write( bloqueleido )
                # except:
                f.write(bloqueleido)
                grabado = grabado + len(bloqueleido)
                logger.info("grabado=%d de %d" % (grabado, totalfichero))
                percent = int(float(grabado) * 100 / float(totalfichero))
                self.progress = percent
                totalmb = float(float(totalfichero) / (1024 * 1024))
                descargadosmb = float(float(grabado) / (1024 * 1024))
                self.actual_size = int(descargadosmb)

                # Lee el siguiente bloque, reintentando para no parar todo al primer timeout
                reintentos = 0
                while reintentos <= maxreintentos:
                    try:

                        before = time.time()
                        bloqueleido = connexion.read(blocksize)
                        after = time.time()
                        if (after - before) > 0:
                            self.velocidad = old_div(len(bloqueleido),
                                                     ((after - before)))
                            falta = totalfichero - grabado
                            if self.velocidad > 0:
                                self.tiempofalta = old_div(
                                    falta, self.velocidad)
                            else:
                                self.tiempofalta = 0
                        break
                    except:
                        import sys
                        reintentos = reintentos + 1
                        logger.info(
                            "ERROR en la descarga del bloque, reintento %d" %
                            reintentos)
                        for line in sys.exc_info():
                            logger.error("%s" % line)

                # Ha habido un error en la descarga
                if reintentos > maxreintentos:
                    logger.error("ERROR en la descarga del fichero")
                    f.close()

                    return -2

            except:
                import traceback, sys
                from pprint import pprint
                exc_type, exc_value, exc_tb = sys.exc_info()
                lines = traceback.format_exception(exc_type, exc_value, exc_tb)
                for line in lines:
                    line_splits = line.split("\n")
                    for line_split in line_splits:
                        logger.error(line_split)

                f.close()
                return -2

        return
def lambda_handler(event, context):
    logging.getLogger().debug('[lambda_handler] Start')

    result = {}
    try:
        #------------------------------------------------------------------
        # Set Log Level
        #------------------------------------------------------------------
        global log_level
        log_level = str(environ['LOG_LEVEL'].upper())
        if log_level not in ['DEBUG', 'INFO','WARNING', 'ERROR','CRITICAL']:
            log_level = 'ERROR'
        logging.getLogger().setLevel(log_level)

        #----------------------------------------------------------
        # Process event
        #----------------------------------------------------------
        logging.getLogger().info(event)

        if "resourceType" in event:
            process_athena_scheduler_event(event)
            result['message'] = "[lambda_handler] Athena scheduler event processed."
            logging.getLogger().debug(result['message'])

        elif 'Records' in event:
            for r in event['Records']:
                bucket_name = r['s3']['bucket']['name']
                key_name = unquote_plus(r['s3']['object']['key'])

                if 'APP_ACCESS_LOG_BUCKET' in environ and bucket_name == environ['APP_ACCESS_LOG_BUCKET']:
                    if key_name.startswith('athena_results/'):
                        process_athena_result(bucket_name, key_name, environ['IP_SET_ID_SCANNERS_PROBES'])
                        result['message'] = "[lambda_handler] Athena app log query result processed."
                        logging.getLogger().debug(result['message'])

                    else:
                        conf_filename = environ['STACK_NAME'] + '-app_log_conf.json'
                        output_filename = environ['STACK_NAME'] + '-app_log_out.json'
                        log_type = environ['LOG_TYPE']
                        ip_set_id = environ['IP_SET_ID_SCANNERS_PROBES']
                        process_log_file(bucket_name, key_name, conf_filename, output_filename, log_type, ip_set_id)
                        result['message'] = "[lambda_handler] App access log file processed."
                        logging.getLogger().debug(result['message'])

                elif 'WAF_ACCESS_LOG_BUCKET' in environ and bucket_name == environ['WAF_ACCESS_LOG_BUCKET']:
                    if key_name.startswith('athena_results/'):
                        process_athena_result(bucket_name, key_name, environ['IP_SET_ID_HTTP_FLOOD'])
                        result['message'] = "[lambda_handler] Athena AWS WAF log query result processed."
                        logging.getLogger().debug(result['message'])

                    else:
                        conf_filename = environ['STACK_NAME'] + '-waf_log_conf.json'
                        output_filename = environ['STACK_NAME'] + '-waf_log_out.json'
                        log_type = 'waf'
                        ip_set_id = environ['IP_SET_ID_HTTP_FLOOD']
                        process_log_file(bucket_name, key_name, conf_filename, output_filename, log_type, ip_set_id)
                        result['message'] = "[lambda_handler] AWS WAF access log file processed."
                        logging.getLogger().debug(result['message'])

                else:
                    result['message'] = "[lambda_handler] undefined handler for bucket %s"%bucket_name
                    logging.getLogger().info(result['message'])

                send_anonymous_usage_data()

        else:
            result['message'] = "[lambda_handler] undefined handler for this type of event"
            logging.getLogger().info(result['message'])

    except Exception as error:
        logging.getLogger().error(str(error))

    logging.getLogger().debug('[lambda_handler] End')
    return result
예제 #58
0
def urldecode(string):
    return compat_urllib_parse.unquote_plus(string)
예제 #59
0
 def __contains__(self, item):
     if "%" in item:
         item = unquote_plus(item)
     return item in self._db
예제 #60
0
파일: routes.py 프로젝트: ProfessorX737/EMS
def delete_notification(path, id):
    id = int(id)
    current_user.deleteNotification(id)
    path = unquote_plus(path).strip("/")
    return redirect(path)