Example #1
0
def get_remote_addons_list():
    list1 = _get_remote_list(URL_PLUG, REGEX_PLUG)
    list2 = _get_remote_list(URL_SNIP, REGEX_SNIP)
    res = []
    if list1: res+= [(l[0], 'Plugin: '+unquote(l[1])) for l in list1]
    if list2: res+= [(l[0], 'Snippets: '+unquote(l[1])) for l in list2]
    return sorted(res)
Example #2
0
        def converter(matchobj):
            """
            Converts the matched URL depending on the parent level (`..`)
            and returns the normalized and hashed URL using the url method
            of the storage.
            """
            matched, url = matchobj.groups()
            # Completely ignore http(s) prefixed URLs,
            # fragments and data-uri URLs
            if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
                return matched
            name_parts = name.split(os.sep)
            # Using posix normpath here to remove duplicates
            url = posixpath.normpath(url)
            url_parts = url.split('/')
            parent_level, sub_level = url.count('..'), url.count('/')
            if url.startswith('/'):
                sub_level -= 1
                url_parts = url_parts[1:]
            if parent_level or not url.startswith('/'):
                start, end = parent_level + 1, parent_level
            else:
                if sub_level:
                    if sub_level == 1:
                        parent_level -= 1
                    start, end = parent_level, 1
                else:
                    start, end = 1, sub_level - 1
            joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
            hashed_url = self.url(unquote(joined_result), force=True)
            file_name = hashed_url.split('/')[-1:]
            relative_url = '/'.join(url.split('/')[:-1] + file_name)

            # Return the hashed version to the file
            return template % unquote(relative_url)
Example #3
0
def whereis(msg, names):
  pasla = list()

  for name in names:
    if name in datas.users:
      if msg.cmd[0] == "ip":
        if len(datas.users[name]) == 1:
          msg.send_chn ("L'ip de %s est %s." %(name, datas.users[name][0].ip))
        else:
          out = ""
          for local in datas.users[name]:
            out += ", " + local.ip
          msg.send_chn ("%s est connecté à plusieurs endroits : %s." %(name, out[2:]))
      else:
        if len(datas.users[name]) == 1:
          msg.send_chn ("%s est %s (%s)." %(name, datas.users[name][0].poste, unquote(datas.users[name][0].location)))
        else:
          out = ""
          for local in datas.users[name]:
            out += ", " + local.poste + " (" + unquote(local.location) + ")"
          msg.send_chn ("%s est %s." %(name, out[2:]))
    else:
      pasla.append(name)

  return pasla
Example #4
0
def test_check_routes(app, io_loop, username, endpoints):
    proxy = app.proxy

    for endpoint in endpoints:
        r = api_request(app, endpoint, method='post')
        r.raise_for_status()

    test_user = orm.User.find(app.db, username)
    assert test_user is not None

    # check a valid route exists for user
    test_user = app.users[username]
    before = sorted(io_loop.run_sync(app.proxy.get_routes))
    assert unquote(test_user.proxy_path) in before

    # check if a route is removed when user deleted
    io_loop.run_sync(lambda: app.proxy.check_routes(app.users, app._service_map))
    io_loop.run_sync(lambda: proxy.delete_user(test_user))
    during = sorted(io_loop.run_sync(app.proxy.get_routes))
    assert unquote(test_user.proxy_path) not in during

    # check if a route exists for user
    io_loop.run_sync(lambda: app.proxy.check_routes(app.users, app._service_map))
    after = sorted(io_loop.run_sync(app.proxy.get_routes))
    assert unquote(test_user.proxy_path) in after

    # check that before and after state are the same
    assert before == after
Example #5
0
def smart_urlquote(url):
    "Quotes a URL if it isn't already quoted."
    def unquote_quote(segment):
        segment = unquote(segment)
        # Tilde is part of RFC3986 Unreserved Characters
        # http://tools.ietf.org/html/rfc3986#section-2.3
        # See also http://bugs.python.org/issue16285
        segment = quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + str('~'))
        return force_text(segment)

    # Handle IDN before quoting.
    try:
        scheme, netloc, path, query, fragment = urlsplit(url)
    except ValueError:
        # invalid IPv6 URL (normally square brackets in hostname part).
        return unquote_quote(url)

    try:
        netloc = netloc.encode('idna').decode('ascii')  # IDN -> ACE
    except UnicodeError:  # invalid domain part
        return unquote_quote(url)

    if query:
        # Separately unquoting key/value, so as to not mix querystring separators
        # included in query values. See #22267.
        query_parts = [(unquote(q[0]), unquote(q[1]))
                       for q in parse_qsl(query, keep_blank_values=True)]
        # urlencode will take care of quoting
        query = urlencode(query_parts)

    path = unquote_quote(path)
    fragment = unquote_quote(fragment)

    return urlunsplit((scheme, netloc, path, query, fragment))
Example #6
0
def decode_query_string(query_string, encoding='utf-8'):

    # Build the object
    result = [None]
    for key_value_str in query_string.split('&'):

        # Ignore empty key/value strings
        if not key_value_str:
            continue

        # Split the key/value string
        key_value = key_value_str.split('=')
        if len(key_value) != 2:
            raise ValueError("Invalid key/value pair '" + key_value_str + "'")
        value = unquote(key_value[1], encoding=encoding)

        # Find/create the object on which to set the value
        parent = result
        key_parent = 0
        for key in (unquote(key, encoding=encoding) for key in key_value[0].split('.')):
            obj = parent[key_parent]

            # Array key?  First "key" of an array must start with "0".
            if isinstance(obj, list) or (obj is None and key == '0'):

                # Create this key's container, if necessary
                if obj is None:
                    obj = parent[key_parent] = []

                # Create the index for this key
                try:
                    key = int(key)
                except:
                    raise ValueError("Invalid key/value pair '" + key_value_str + "'")
                if key == len(obj):
                    obj.append(None)
                elif key < 0 or key > len(obj):
                    raise ValueError("Invalid key/value pair '" + key_value_str + "'")

            # Dictionary key
            else:

                # Create this key's container, if necessary
                if obj is None:
                    obj = parent[key_parent] = {} # pylint: disable=redefined-variable-type

                # Create the index for this key
                if obj.get(key) is None:
                    obj[key] = None

            # Update the parent object and key
            parent = obj
            key_parent = key

        # Set the value
        if parent[key_parent] is not None:
            raise ValueError("Duplicate key '" + key_value_str + "'")
        parent[key_parent] = value

    return result[0] if (result[0] is not None) else {}
def main():
    parser = argparse.ArgumentParser(description="Translate YAML `base_redirect` to .htaccess")
    parser.add_argument(
        "yaml_file",
        type=argparse.FileType("r"),
        default=sys.stdin,
        nargs="?",
        help="read from the YAML file (or STDIN)",
    )
    parser.add_argument(
        "htaccess_file",
        type=argparse.FileType("w"),
        default=sys.stdout,
        nargs="?",
        help="write to the .htaccess file (or STDOUT)",
    )
    args = parser.parse_args()

    # Load YAML document and look for 'entries' list.
    document = yaml.load(args.yaml_file)

    if not "idspace" in document or type(document["idspace"]) is not str:
        raise ValueError('YAML document must contain "idspace" string')
    idspace = document["idspace"]

    if not "base_url" in document or type(document["base_url"]) is not str:
        raise ValueError('YAML document must contain "base_url" string')

    if "base_redirect" in document and type(document["base_redirect"]) is str:
        base_url = unquote(document["base_url"])
        base_redirect = unquote(document["base_redirect"])
        args.htaccess_file.write(header_template % idspace)
        directive = 'RedirectMatch temp "(?i)^%s$" "%s"' % (base_url, base_redirect)
        args.htaccess_file.write(directive + "\n\n")
Example #8
0
def get_state_vars():
    curl_file = 'curl.sh'
    assert file_exists(curl_file), '`{}` file not found'.format(curl_file)
    with open(curl_file) as fd:
        data = dict(arg.split('=', maxsplit=1) for arg in re.search("--data '([^']*)'", fd.read()).group(1).split('&'))
    assert data, 'could not parse curl arguments'
    return unquote(data['__VIEWSTATE'].strip()), unquote(data['__EVENTVALIDATION'].strip())
Example #9
0
def service_url(url, defaults={}, extras_name='extras'):
    """
    environs handler for URLS, turning a url string into a dictionary
    of its component parts.
    """
    try:
        parsed = urlparse(url)
    except Exception:
        raise EnvError(
            'Service URLS look like: servtype://user:pass@host:port/' +
            extras_name + '?param1=Foo&param2=Bar')

    conf = {
        'host': unquote(parsed.hostname) if parsed.hostname else None,
        'port': parsed.port,
        'user': unquote(parsed.username) if parsed.username else None,
        'password': unquote(parsed.password) if parsed.password else None,
        extras_name: unquote(parsed.path)[1:] if parsed.path else None,
    }

    # If we get multiple values for a given key, use the last.
    extra_params = {k: v[-1] for k, v in parse_qs(parsed.query).items()}
    conf.update(extra_params)

    missing_defaults = {k: v for k, v in defaults.items()
                        if k not in conf or conf[k] is None}
    conf.update(missing_defaults)

    return conf
Example #10
0
        def translate_path(self, path):
            """Translate a /-separated PATH to the local filename syntax.

            Components that mean special things to the local file system
            (e.g. drive or directory names) are ignored.  (XXX They should
            probably be diagnosed.)

            """
            # abandon query parameters
            path = path.split('?',1)[0]
            path = path.split('#',1)[0]
            if path.startswith(ACME_WELL_KNOWN):
                path = path[len(ACME_WELL_KNOWN):]
            else:
                return ""
            # Don't forget explicit trailing slash when normalizing. Issue17324
            trailing_slash = path.rstrip().endswith('/')
            try:
                path = urlparse.unquote(path, errors='surrogatepass')
            except (UnicodeDecodeError, TypeError):
                path = urlparse.unquote(path)
            path = posixpath.normpath(path)
            words = path.split('/')
            words = filter(None, words)
            path = serve_from_path
            for word in words:
                if os.path.dirname(word) or word in (os.curdir, os.pardir):
                    # Ignore components that are not a simple file/directory name
                    continue
                path = os.path.join(path, word)
            if trailing_slash:
                path += '/'
            return path
Example #11
0
 def normalizeUrl(self, url, base=None):
     if url and not (isHttpUrl(url) or os.path.isabs(url)):
         if base is not None and not isHttpUrl(base) and '%' in url:
             url = unquote(url)
         if base:
             if isHttpUrl(base):
                 scheme, sep, path = base.partition("://")
                 normedPath = scheme + sep + posixpath.normpath(os.path.dirname(path) + "/" + url)
             else:
                 if '%' in base:
                     base = unquote(base)
                 normedPath = os.path.normpath(os.path.join(os.path.dirname(base),url))
         else:
             normedPath = url
         if normedPath.startswith("file://"): normedPath = normedPath[7:]
         elif normedPath.startswith("file:\\"): normedPath = normedPath[6:]
         
         # no base, not normalized, must be relative to current working directory
         if base is None and not os.path.isabs(url): 
             normedPath = os.path.abspath(normedPath)
     else:
         normedPath = url
     
     if normedPath:
         if isHttpUrl(normedPath):
             scheme, sep, pathpart = normedPath.partition("://")
             pathpart = pathpart.replace('\\','/')
             endingSep = '/' if pathpart[-1] == '/' else ''  # normpath drops ending directory separator
             return scheme + "://" + posixpath.normpath(pathpart) + endingSep
         normedPath = os.path.normpath(normedPath)
         if normedPath.startswith(self.cacheDir):
             normedPath = self.cacheFilepathToUrl(normedPath)
     return normedPath
Example #12
0
def decode(encoded_str):
    """Decode an encrypted HTTP basic authentication string. Returns a tuple of
    the form (username, password), and raises a DecodeError exception if
    nothing could be decoded.
    """
    split = encoded_str.strip().encode("latin1").split(None)

    # If split is only one element, try to decode the username and password
    # directly.
    if len(split) == 1:
        try:
            username, password = b64decode(split[0]).split(b':', 1)
        except:
            raise DecodeError

    # If there are only two elements, check the first and ensure it says
    # 'basic' so that we know we're about to decode the right thing. If not,
    # bail out.
    elif len(split) == 2:
        if split[0].strip().lower() == b'basic':
            try:
                username, password = b64decode(split[1]).split(b':', 1)
            except:
                raise DecodeError
        else:
            raise DecodeError

    # If there are more than 2 elements, something crazy must be happening.
    # Bail.
    else:
        raise DecodeError

    return (unquote(username.decode('latin1')),
            unquote(password.decode('latin1')))
Example #13
0
def parse_s3_url(url) -> Tuple[str, str, str, str]:
    """parses a s3 url and extract credentials and s3 object path.

    A S3 URL looks like s3://aws_key:aws_secret@bucketname/objectname where
    credentials are optional. Since credentials might include characters
    such as `/`, `@` or `#`, they have to be urlquoted in the url.

    Args:
        url (str): the s3 url

    Returns:
        tuple: (access_key, secret, bucketname, objectname). If credentials
        are not specified, `access_key` and `secret` are set to None.
    """
    urlchunks = urlparse(url)
    scheme = urlchunks.scheme
    assert scheme in S3_SCHEMES, f'{scheme} unsupported, use one of {S3_SCHEMES}'
    assert not urlchunks.params, f's3 url should not have params, got {urlchunks.params}'
    assert not urlchunks.query, f's3 url should not have query, got {urlchunks.query}'
    assert not urlchunks.fragment, f's3 url should not have fragment, got {urlchunks.fragment}'
    # if either username or password is specified, we have credentials
    if urlchunks.username or urlchunks.password:
        # and they should both not be empty
        assert urlchunks.username, 's3 access key should not be empty'
        assert urlchunks.password, 's3 secret should not be empty'
        access_key = unquote(urlchunks.username)
        secret = unquote(urlchunks.password)
    else:
        access_key = secret = None
    objectname = urlchunks.path.lstrip('/')  # remove leading /, it's not part of the objectname
    assert objectname, f"s3 objectname can't be empty"
    return access_key, secret, urlchunks.hostname, objectname
Example #14
0
    def searchForReleaseImages(self, artistName, releaseTitle, query=None):

        result = []
        query = parse.unquote(query)
        if query and self.regex.match(parse.unquote(query)):
            result.append(ImageSearchResult(0, 0, query))
            return result

        try:
            bingResult = self.bingSearchImages(query if query else artistName + " " + releaseTitle)
            if bingResult:
                for br in bingResult:
                    result.append(br)
        except:
            pass

        try:
            it = self.itunesSearchArtistReleaseImages(artistName, releaseTitle)
            if it:
                for i in it:
                    result.append(i)
        except:
            pass

        return result
Example #15
0
def generate_aws_v4_signature(request):
    message = unquote(request.POST['to_sign'])
    dest = get_s3direct_destinations().get(unquote(request.POST['dest']))
    signing_date = datetime.strptime(request.POST['datetime'],
                                     '%Y%m%dT%H%M%SZ')

    auth = dest.get('auth')
    if auth and not auth(request.user):
        resp = json.dumps({'error': 'Permission denied.'})
        return HttpResponseForbidden(resp, content_type='application/json')

    region = getattr(settings, 'AWS_S3_REGION_NAME', None)
    if not region:
        resp = json.dumps({'error': 'S3 region config missing.'})
        return HttpResponseServerError(resp, content_type='application/json')

    aws_credentials = get_aws_credentials()
    if not aws_credentials.secret_key or not aws_credentials.access_key:
        resp = json.dumps({'error': 'AWS credentials config missing.'})
        return HttpResponseServerError(resp, content_type='application/json')

    signing_key = get_aws_v4_signing_key(aws_credentials.secret_key,
                                         signing_date, region, 's3')

    signature = get_aws_v4_signature(signing_key, message)
    resp = json.dumps({'s3ObjKey': signature})
    return HttpResponse(resp, content_type='application/json')
Example #16
0
    def parse_qs(self, next_url):
        if not next_url: return None
        if sys.version_info >= (3, 0):
            from urllib.parse import urlparse, unquote
            safe_unquote = lambda s: unquote(s)
        else:
            from urlparse import urlparse, unquote
            safe_unquote = lambda s: unquote(s.encode('utf8')).decode('utf8')

        result_qs = {}
        query = urlparse(next_url).query
        for kv in query.split('&'):
            # split than unquote() to k,v strings
            k, v = map(safe_unquote, kv.split('='))

            # merge seed_illust_ids[] liked PHP params to array
            matched = re.match('(?P<key>[\w]*)\[(?P<idx>[\w]*)\]', k)
            if matched:
                mk = matched.group('key')
                marray = result_qs.get(mk, [])
                # keep the origin sequence, just ignore group('idx')
                result_qs[mk] = marray + [v]
            else:
                result_qs[k] = v

        return result_qs
Example #17
0
 def hashed_name(self, name, content=None, filename=None):
     # `filename` is the name of file to hash if `content` isn't given.
     # `name` is the base name to construct the new hashed filename from.
     parsed_name = urlsplit(unquote(name))
     clean_name = parsed_name.path.strip()
     filename = (filename and urlsplit(unquote(filename)).path.strip()) or clean_name
     opened = content is None
     if opened:
         if not self.exists(filename):
             raise ValueError("The file '%s' could not be found with %r." % (filename, self))
         try:
             content = self.open(filename)
         except IOError:
             # Handle directory paths and fragments
             return name
     try:
         file_hash = self.file_hash(clean_name, content)
     finally:
         if opened:
             content.close()
     path, filename = os.path.split(clean_name)
     root, ext = os.path.splitext(filename)
     if file_hash is not None:
         file_hash = ".%s" % file_hash
     hashed_name = os.path.join(path, "%s%s%s" %
                                (root, file_hash, ext))
     unparsed_name = list(parsed_name)
     unparsed_name[2] = hashed_name
     # Special casing for a @font-face hack, like url(myfont.eot?#iefix")
     # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
     if '?#' in name and not unparsed_name[3]:
         unparsed_name[2] += '?'
     return urlunsplit(unparsed_name)
Example #18
0
def parse_gff3(filename):
    if filename.endswith(".gz"):
        open_func = gzip.open 
    else:
        open_func = open  # ta funkcja jest wbudowana i sluzy do otwierania plikow
    
    with open_func(filename) as document:
        # czytamy troche wolniej, ale wszystko sie zmiesci w pamieci
        # UWAGA: czytajac w ten sposob zawsze dokleja sie nam znak nowej linii \0
        # i warto go usunac przy uzyciu strip.
        for line in document:  
            # pomijamy komentarze
            if line.startswith("#"): 
                continue
                
            parts = line.strip().split("\t")
            # jesli tu jest blad to znaczy, z split trzeba usunac "\t"
            assert len(parts) == len(GFFRecord.FIELD_COUNT)
            #Normalize data
            normalizedInfo = {
                "seqid": None if parts[0] == "." else unquote(parts[0]),
                "source": None if parts[1] == "." else unquote(parts[1]),
                "type": None if parts[2] == "." else unquote(parts[2]),
                "start": None if parts[3] == "." else int(parts[3]),
                "end": None if parts[4] == "." else int(parts[4]),
                "score": None if parts[5] == "." else float(parts[5]),
                "strand": None if parts[6] == "." else unquote(parts[6]),
                "phase": None if parts[7] == "." else unquote(parts[7]),
                "attributes": parse_gff_attributes(parts[8])
            }
            #Alternatively, you can emit the dictionary here, if you need mutability:
            #    yield normalizedInfo
            yield GFFRecord(**normalizedInfo)
Example #19
0
 def normalizeUrl(self, url, base=None):
     if url and not (url.startswith('http://') or os.path.isabs(url)):
         if base is not None and not base.startswith('http:') and '%' in url:
             url = unquote(url)
         if base:
             if base.startswith("http://"):
                 prot, sep, path = base.partition("://")
                 normedPath = prot + sep + posixpath.normpath(os.path.dirname(path) + "/" + url)
             else:
                 if '%' in base:
                     base = unquote(base)
                 normedPath = os.path.normpath(os.path.join(os.path.dirname(base),url))
         else:
             normedPath = url
         if normedPath.startswith("file://"): normedPath = normedPath[7:]
         elif normedPath.startswith("file:\\"): normedPath = normedPath[6:]
     else:
         normedPath = url
     
     if normedPath:
         if normedPath.startswith('http:'):
             return normedPath.replace('\\','/')
         elif os.path.sep == '\\' and '/' in normedPath:
             normedPath = normedPath.replace('/', '\\') # convert MSFT paths into '\' when normalizing
         if normedPath.startswith(self.cacheDir):
             normedPath = self.cacheFilepathToUrl(normedPath)
     return normedPath
Example #20
0
File: auth.py Project: unikmhz/npui
def find_princs_digest(param, request):
    sess = DBSession()

    try:
        user = sess.query(User).filter(User.state == UserState.active,
                                       User.enabled.is_(True),
                                       User.login == param['username']).one()
    except NoResultFound:
        return None
    if not user.password_ha1:
        return None
    req_path = unquote(request.path.lower())
    uri_path = unquote(param['uri'].lower())
    if req_path != uri_path:
        return None
    ha2 = hashlib.md5(('%s:%s' % (request.method,
                                  param['uri'])).encode()).hexdigest()
    data = '%s:%s:%s:%s:%s' % (param['nonce'], param['nc'],
                               param['cnonce'], 'auth', ha2)
    resp = hashlib.md5(('%s:%s' % (user.password_ha1,
                                   data)).encode()).hexdigest()
    if hmac.compare_digest(resp, param['response']):
        groups = ['g:%s' % (user.group.name,)]
        for sgr in user.secondary_groups:
            if sgr == user.group:
                continue
            groups.append('g:%s' % (sgr.name,))
        return groups
    return None
Example #21
0
 def __init__(self, url, exchange='logging.gelf', debugging_fields=True,
              extra_fields=True, fqdn=False, exchange_type='fanout', localname=None,
              facility=None, virtual_host='/', routing_key=''):
     self.url = url
     parsed = urlparse(url)
     if parsed.scheme != 'amqp':
         raise ValueError('invalid URL scheme (expected "amqp"): %s' % url)
     host = parsed.hostname or 'localhost'
     port = _ifnone(parsed.port, 5672)
     virtual_host = virtual_host if not unquote(parsed.path[1:]) else unquote(parsed.path[1:])
     self.cn_args = {
         'host': '%s:%s' % (host, port),
         'userid': _ifnone(parsed.username, 'guest'),
         'password': _ifnone(parsed.password, 'guest'),
         'virtual_host': virtual_host,
         'insist': False,
     }
     self.exchange = exchange
     self.debugging_fields = debugging_fields
     self.extra_fields = extra_fields
     self.fqdn = fqdn
     self.exchange_type = exchange_type
     self.localname = localname
     self.facility = facility
     self.virtual_host = virtual_host
     self.routing_key = routing_key
     SocketHandler.__init__(self, host, port)
     self.addFilter(ExcludeFilter('amqplib'))
Example #22
0
File: wsgi.py Project: colons/spyne
def _parse_qs(qs):
    pairs = (s2 for s1 in qs.split('&') for s2 in s1.split(';'))
    retval = odict()

    for name_value in pairs:
        if name_value is None or len(name_value) == 0:
            continue
        nv = name_value.split('=', 1)

        if len(nv) != 2:
            # Handle case of a control-name with no equal sign
            nv.append(None)

        name = unquote(nv[0].replace('+', ' '))

        value = None
        if nv[1] is not None:
            value = unquote(nv[1].replace('+', ' '))

        l = retval.get(name, None)
        if l is None:
            l = retval[name] = []
        l.append(value)

    return retval
Example #23
0
    def translate_path(self, path):
        """
        Ignore the actual request path and just serve a specific folder.

        Mostly same as py3.6 source, replacing "path = os.getcwd()" with HERE so
        that the directory list or file transfers are relative to HERE rather
        than the current working directory.
        """
        # abandon query parameters
        path = path.split("?", 1)[0]
        path = path.split("#", 1)[0]
        # Don"t forget explicit trailing slash when normalizing. Issue17324
        trailing_slash = path.rstrip().endswith("/")
        try:
            path = unquote(path, errors="surrogatepass")
        except UnicodeDecodeError:
            path = unquote(path)
        except TypeError:  # py2 only accepts one param.
            path = unquote(path)
        path = posixpath.normpath(path)
        words = path.split("/")
        words = filter(None, words)
        path = HERE  # edited
        for word in words:
            if os.path.dirname(word) or word in (os.curdir, os.pardir):
                # Ignore components that are not a simple file/directory name
                continue
            path = os.path.join(path, word)
        if trailing_slash:
            path += "/"
        return path
Example #24
0
 def set(self, station, stream=None):
     station = unquote(station)
     if stream is not None:
         stream = unquote(stream)
     success = self.radio.set(station, stream)
     resp = 'Setting active stream to %s %s' % (station, stream)
     return json.dumps({'success': success, 'resp': resp}) + '\n'
Example #25
0
    def normalize_parameters(params):
        """ Normalize parameters """
        params = params or {}
        normalized_parameters = OrderedDict()

        def get_value_like_as_php(val):
            """ Prepare value for quote """
            try:
                base = basestring
            except NameError:
                base = (str, bytes)

            if isinstance(val, base):
                return val
            elif isinstance(val, bool):
                return "1" if val else ""
            elif isinstance(val, int):
                return str(val)
            elif isinstance(val, float):
                return str(int(val)) if val % 1 == 0 else str(val)
            else:
                return ""

        for key, value in params.items():
            value = get_value_like_as_php(value)
            key = quote(unquote(str(key))).replace("%", "%25")
            value = quote(unquote(str(value))).replace("%", "%25")
            normalized_parameters[key] = value

        return normalized_parameters
Example #26
0
    def get_path(self, id_in_tree = None):

        id = ""
        if id_in_tree is None:

            if  (unquote(self.post["dir"])  == "0"):
                return True, ""
            id = unquote(self.post["dir"])
        else:
            id = id_in_tree

        dict_lbdoc = {"lb_ctrl_cookie": self.cookie ,
                      "lb_ctrl_op": "db_search",
                      "lb_ctrl_db": self.db_name,
                      "lb_ctrl_qry": "str_id_in_tree='" + id + "'",
                      'lb_ctrl_ctx': 'search_tree_file'}
        submit_operations_df_return = BrsLbdocCoreCs().submit_operations_df(None, dict_lbdoc, False)
        if submit_operations_df_return.operation_group[0].lb_occurrences_list:
            result = submit_operations_df_return.operation_group[0].lb_occurrences_list.results[0]

            return True,result.str_titulo

        else:
            a = ""
            lbdoc_return_objs = submit_operations_df_return.lbdoc_return_objs.lbdoc_return_objs[0]
            json_return = json.dumps(lbdoc_return_objs, default=lambda o: o.__dict__)
            return False,json_return
Example #27
0
def handle_client(client_reader, client_writer):

    req_line = yield from asyncio.wait_for(client_reader.readline(),
                                       timeout=10.0)
#   print('Req line "{}"'.format(req_line))
    while True:
        header = yield from asyncio.wait_for(client_reader.readline(),
                                       timeout=10.0)
        if header == b'\r\n':
            break
#       print('Header "{}"'.format(header))
        key, val = map(str.strip, header.rstrip().decode().lower().split(':', 1))

    method, path, version = req_line.decode().split(' ')
#   print('method = {!r}; path = {!r}; version = {!r}'.format(method, path, version))

    if path.startswith('/send_req'):
        path, args = path.split('?')
        args = loads(unquote(args))
        request_handler = RequestHandler()
        yield from request_handler.run(client_writer, args)
#           self.reader, self.writer, self.transport, self._request_handler, args)
    elif path.startswith('/get_login'):
        path, args = path.split('?')
        args = loads(unquote(args))
        request_handler = RequestHandler()
        yield from request_handler.get_login(client_writer, args)
    elif path.startswith('/dev'):
        path = path[4:]
        send_js_dev(client_writer, path)
    else:
        path = path[1:]
        send_js_prod(client_writer, path)
    def parse(self, ticket):
        """Parses the passed ticket, returning a tuple containing the digest,
        user_id, valid_until, tokens, and user_data fields
        """
        if len(ticket) < self._min_ticket_size():
            raise TicketParseError(ticket, 'Invalid ticket length')

        digest_len = self._hash.digest_size * 2
        digest = ticket[:digest_len]

        try:
            time_len = 8
            time = int(ticket[digest_len:digest_len + time_len], 16)
        except:
            raise TicketParseError(ticket, 'Invalid time field')

        parts = ticket[digest_len + time_len:].split('!')
        if len(parts) != 3:
            raise TicketParseError(ticket, 'Missing parts')

        user_id = ulp.unquote(parts[0])
        tokens = ()
        if parts[1]:
            tokens = tuple((ulp.unquote(t) for t in parts[1].split(',')))

        user_data = ulp.unquote(parts[2])

        return TicketInfo(digest, user_id, tokens, user_data, time)
Example #29
0
def baidu_get_song_data(sid):
    data = json.loads(get_html('http://music.baidu.com/data/music/fmlink?songIds=%s' % sid, faker = True))['data']

    if data['xcode'] != '':
    # inside china mainland
        return data['songList'][0]
    else:
    # outside china mainland
        html = get_html("http://music.baidu.com/song/%s" % sid)

        # baidu pan link
        sourceLink = r1(r'"link-src-info"><a href="([^"]+)"', html)
        if sourceLink != None:
            sourceLink = sourceLink.replace('&amp;', '&')
        sourceHtml = get_html(sourceLink) if sourceLink != None else None

        songLink =  r1(r'\\"dlink\\":\\"([^"]*)\\"', sourceHtml).replace('\\\\/', '/') if sourceHtml != None else r1(r'download_url="([^"]+)"', html)
        songName = parse.unquote(r1(r'songname=([^&]+)&', html))
        artistName = parse.unquote(r1(r'songartistname=([^&]+)&', html))
        albumName = parse.unquote(r1(r'songartistname=([^&]+)&', html))
        lrcLink = r1(r'data-lyricdata=\'{ "href":"([^"]+)"', html)

        return json.loads(json.dumps({'songLink'   : songLink,
                                      'songName'   : songName,
                                      'artistName' : artistName,
                                      'albumName'  : albumName,
                                      'lrcLink'    : lrcLink}, ensure_ascii=False))
Example #30
0
def defang(url):
    try:
        unquote(url)
        u = urlparse()
    except:
        return "Invalid URL"

    # hxxps:\/\/www[dot]cwi[dot]nl
    defanged_url = f"{u.scheme.replace('t', 'x')}:\/\/{u.hostname.replace('.', '[dot]')}"

    if u.port:
        defanged_url += ':' + str(u.port)

    if u.path:
        defanged_url += u.path.replace('/', '\/')

    if u.params:
        defanged_url += u.params.replace('/', '\/').replace('.', '[dot]')

    if u.query:
        defanged_url += '?' + u.query.replace('/', '\/').replace('.', '[dot]')

    if u.fragment:
        defanged_url += '#' + u.fragment

    return defanged_url
Example #31
0
        def now_handler(event, context):
            payload = json.loads(event['body'])

            headers = Headers(payload.get('headers', {}))

            body = payload.get('body', '')
            if body != '':
                if payload.get('encoding') == 'base64':
                    body = base64.b64decode(body)
            if isinstance(body, string_types):
                body = to_bytes(body, charset='utf-8')

            url = urlparse(unquote(payload['path']))
            query = url.query
            path = url.path

            environ = {
                'CONTENT_LENGTH':
                str(len(body)),
                'CONTENT_TYPE':
                headers.get('content-type', ''),
                'PATH_INFO':
                path,
                'QUERY_STRING':
                query,
                'REMOTE_ADDR':
                headers.get(
                    'x-forwarded-for',
                    headers.get('x-real-ip', payload.get('true-client-ip',
                                                         ''))),
                'REQUEST_METHOD':
                payload['method'],
                'SERVER_NAME':
                headers.get('host', 'lambda'),
                'SERVER_PORT':
                headers.get('x-forwarded-port', '80'),
                'SERVER_PROTOCOL':
                'HTTP/1.1',
                'event':
                event,
                'context':
                context,
                'wsgi.errors':
                sys.stderr,
                'wsgi.input':
                BytesIO(body),
                'wsgi.multiprocess':
                False,
                'wsgi.multithread':
                False,
                'wsgi.run_once':
                False,
                'wsgi.url_scheme':
                headers.get('x-forwarded-proto', 'http'),
                'wsgi.version': (1, 0),
            }

            for key, value in environ.items():
                if isinstance(value, string_types) and key != 'QUERY_STRING':
                    environ[key] = wsgi_encoding_dance(value)

            for key, value in headers.items():
                key = 'HTTP_' + key.upper().replace('-', '_')
                if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
                    environ[key] = value

            response = Response.from_app(__NOW_HANDLER_FILENAME.app, environ)

            return_dict = {
                'statusCode': response.status_code,
                'headers': dict(response.headers)
            }

            if response.data:
                return_dict['body'] = base64.b64encode(
                    response.data).decode('utf-8')
                return_dict['encoding'] = 'base64'

            return return_dict
Example #32
0
 def _unquote_all(links: list[list[str]]) -> list[list[str]]:
     return list(
         map(
             lambda item_links: list(
                 map(lambda link: unquote(link), item_links)), links))
    def grade_handler(self, request, suffix):  # lint-amnesty, pylint: disable=unused-argument
        """
        This is called by courseware.module_render, to handle an AJAX call.

        Used only for grading. Returns XML response.

        Example of request body from LTI provider::

        <?xml version = "1.0" encoding = "UTF-8"?>
            <imsx_POXEnvelopeRequest xmlns = "some_link (may be not required)">
              <imsx_POXHeader>
                <imsx_POXRequestHeaderInfo>
                  <imsx_version>V1.0</imsx_version>
                  <imsx_messageIdentifier>528243ba5241b</imsx_messageIdentifier>
                </imsx_POXRequestHeaderInfo>
              </imsx_POXHeader>
              <imsx_POXBody>
                <replaceResultRequest>
                  <resultRecord>
                    <sourcedGUID>
                      <sourcedId>feb-123-456-2929::28883</sourcedId>
                    </sourcedGUID>
                    <result>
                      <resultScore>
                        <language>en-us</language>
                        <textString>0.4</textString>
                      </resultScore>
                    </result>
                  </resultRecord>
                </replaceResultRequest>
              </imsx_POXBody>
            </imsx_POXEnvelopeRequest>

        Example of correct/incorrect answer XML body:: see response_xml_template.
        """
        response_xml_template = textwrap.dedent("""\
            <?xml version="1.0" encoding="UTF-8"?>
            <imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">
                <imsx_POXHeader>
                    <imsx_POXResponseHeaderInfo>
                        <imsx_version>V1.0</imsx_version>
                        <imsx_messageIdentifier>{imsx_messageIdentifier}</imsx_messageIdentifier>
                        <imsx_statusInfo>
                            <imsx_codeMajor>{imsx_codeMajor}</imsx_codeMajor>
                            <imsx_severity>status</imsx_severity>
                            <imsx_description>{imsx_description}</imsx_description>
                            <imsx_messageRefIdentifier>
                            </imsx_messageRefIdentifier>
                        </imsx_statusInfo>
                    </imsx_POXResponseHeaderInfo>
                </imsx_POXHeader>
                <imsx_POXBody>{response}</imsx_POXBody>
            </imsx_POXEnvelopeResponse>
        """)
        # Returns when `action` is unsupported.
        # Supported actions:
        #   - replaceResultRequest.
        unsupported_values = {
            'imsx_codeMajor': 'unsupported',
            'imsx_description':
            'Target does not support the requested operation.',
            'imsx_messageIdentifier': 'unknown',
            'response': ''
        }
        # Returns if:
        #   - past due grades are not accepted and grade is past due
        #   - score is out of range
        #   - can't parse response from TP;
        #   - can't verify OAuth signing or OAuth signing is incorrect.
        failure_values = {
            'imsx_codeMajor': 'failure',
            'imsx_description': 'The request has failed.',
            'imsx_messageIdentifier': 'unknown',
            'response': ''
        }

        if not self.accept_grades_past_due and self.is_past_due():
            failure_values['imsx_description'] = "Grade is past due"
            return Response(response_xml_template.format(**failure_values),
                            content_type="application/xml")

        try:
            imsx_messageIdentifier, sourcedId, score, action = self.parse_grade_xml_body(
                request.body)
        except Exception as e:  # lint-amnesty, pylint: disable=broad-except
            error_message = "Request body XML parsing error: " + escape(str(e))
            log.debug("[LTI]: " + error_message)  # lint-amnesty, pylint: disable=logging-not-lazy
            failure_values['imsx_description'] = error_message
            return Response(response_xml_template.format(**failure_values),
                            content_type="application/xml")

        # Verify OAuth signing.
        try:
            self.verify_oauth_body_sign(request)
        except (ValueError, LTIError) as e:
            failure_values['imsx_messageIdentifier'] = escape(
                imsx_messageIdentifier)
            error_message = "OAuth verification error: " + escape(str(e))
            failure_values['imsx_description'] = error_message
            log.debug("[LTI]: " + error_message)  # lint-amnesty, pylint: disable=logging-not-lazy
            return Response(response_xml_template.format(**failure_values),
                            content_type="application/xml")

        real_user = self.system.get_real_user(
            parse.unquote(sourcedId.split(':')[-1]))
        if not real_user:  # that means we can't save to database, as we do not have real user id.
            failure_values['imsx_messageIdentifier'] = escape(
                imsx_messageIdentifier)
            failure_values['imsx_description'] = "User not found."
            return Response(response_xml_template.format(**failure_values),
                            content_type="application/xml")

        if action == 'replaceResultRequest':
            self.set_user_module_score(real_user, score, self.max_score())

            values = {
                'imsx_codeMajor': 'success',
                'imsx_description': f'Score for {sourcedId} is now {score}',
                'imsx_messageIdentifier': escape(imsx_messageIdentifier),
                'response': '<replaceResultResponse/>'
            }
            log.debug("[LTI]: Grade is saved.")
            return Response(response_xml_template.format(**values),
                            content_type="application/xml")

        unsupported_values['imsx_messageIdentifier'] = escape(
            imsx_messageIdentifier)
        log.debug("[LTI]: Incorrect action.")
        return Response(response_xml_template.format(**unsupported_values),
                        content_type='application/xml')
    def oauth_params(self, custom_parameters, client_key, client_secret):
        """
        Signs request and returns signature and OAuth parameters.

        `custom_paramters` is dict of parsed `custom_parameter` field
        `client_key` and `client_secret` are LTI tool credentials.

        Also *anonymous student id* is passed to template and therefore to LTI provider.
        """

        client = oauthlib.oauth1.Client(client_key=str(client_key),
                                        client_secret=str(client_secret))

        # Must have parameters for correct signing from LTI:
        body = {
            'user_id': self.get_user_id(),
            'oauth_callback': 'about:blank',
            'launch_presentation_return_url': '',
            'lti_message_type': 'basic-lti-launch-request',
            'lti_version': 'LTI-1p0',
            'roles': self.role,

            # Parameters required for grading:
            'resource_link_id': self.get_resource_link_id(),
            'lis_result_sourcedid': self.get_lis_result_sourcedid(),
            'context_id': self.context_id,
        }

        if self.has_score:
            body.update(
                {'lis_outcome_service_url': self.get_outcome_service_url()})

        self.user_email = ""  # lint-amnesty, pylint: disable=attribute-defined-outside-init
        self.user_username = ""  # lint-amnesty, pylint: disable=attribute-defined-outside-init

        # Username and email can't be sent in studio mode, because the user object is not defined.
        # To test functionality test in LMS

        if callable(self.runtime.get_real_user):
            real_user_object = self.runtime.get_real_user(
                self.runtime.anonymous_student_id)
            try:
                self.user_email = real_user_object.email  # lint-amnesty, pylint: disable=attribute-defined-outside-init
            except AttributeError:
                self.user_email = ""  # lint-amnesty, pylint: disable=attribute-defined-outside-init
            try:
                self.user_username = real_user_object.username  # lint-amnesty, pylint: disable=attribute-defined-outside-init
            except AttributeError:
                self.user_username = ""  # lint-amnesty, pylint: disable=attribute-defined-outside-init

        if self.ask_to_send_username and self.user_username:
            body["lis_person_sourcedid"] = self.user_username
        if self.ask_to_send_email and self.user_email:
            body["lis_person_contact_email_primary"] = self.user_email

        # Appending custom parameter for signing.
        body.update(custom_parameters)

        headers = {
            # This is needed for body encoding:
            'Content-Type': 'application/x-www-form-urlencoded',
        }

        try:
            __, headers, __ = client.sign(str(self.launch_url.strip()),
                                          http_method='POST',
                                          body=body,
                                          headers=headers)
        except ValueError:  # Scheme not in url.
            # https://github.com/idan/oauthlib/blob/master/oauthlib/oauth1/rfc5849/signature.py#L136
            # Stubbing headers for now:
            log.info(
                "LTI module %s in course %s does not have oauth parameters correctly configured.",
                self.location,
                self.location.course_key,
            )
            headers = {
                'Content-Type':
                'application/x-www-form-urlencoded',
                'Authorization':
                'OAuth oauth_nonce="80966668944732164491378916897", \
oauth_timestamp="1378916897", oauth_version="1.0", oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="", oauth_signature="frVp4JuvT1mVXlxktiAUjQ7%2F1cw%3D"'
            }

        params = headers['Authorization']
        # Parse headers to pass to template as part of context:
        params = dict([
            param.strip().replace('"', '').split('=')
            for param in params.split(',')
        ])

        params['oauth_nonce'] = params['OAuth oauth_nonce']
        del params['OAuth oauth_nonce']

        # oauthlib encodes signature with
        # 'Content-Type': 'application/x-www-form-urlencoded'
        # so '='' becomes '%3D'.
        # We send form via browser, so browser will encode it again,
        # So we need to decode signature back:
        params['oauth_signature'] = parse.unquote(params['oauth_signature']).encode('utf-8').decode('utf8')  # lint-amnesty, pylint: disable=line-too-long

        # Add LTI parameters to OAuth parameters for sending in form.
        params.update(body)
        return params
def extract_images(data):
    image_urls = []
    for x in range(1,9):
        image_url = parse.unquote(data['sProdImgNo_%d'%x]).replace("200", "0")
        image_urls.append(image_url)
    return image_urls
def remove_trash(url):
    """ 
    Removing trash from tv url on storage
    """
    return unquote(url[:url.find(".")])
Example #37
0
 def version(self):
     if "version" in self.kwargs:
         return unquote(self.kwargs["version"])
     return None
def google(url, ref=None):
	try:
		if 'lh3.googleusercontent' in url or 'bp.blogspot' in url:
			newheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
							'Accept': '*/*', 'Host': 'lh3.googleusercontent.com', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6,es;q=0.4',
							'Accept-Encoding': 'identity;q=1, *;q=0', 'Referer': ref,
							'Connection': 'Keep-Alive', 'X-Client-Data': 'CJK2yQEIo7bJAQjEtskBCPqcygEIqZ3KAQjSncoBCKijygE=',
							'Range': 'bytes=0-'}
			resp = client.request(url, headers=newheaders, redirect=False, output='extended', timeout='10')
			loc = resp[2]['Location']
			c = resp[2]['Set-Cookie'].split(';')[0]
			url = '%s|Cookie=%s' % (loc, c)
			return url

		if any(x in url for x in ('youtube.', 'docid=')):
			url = 'https://drive.google.com/file/d/%s/view' % re.compile(r'docid=([\w-]+)').findall(url)[0]

		netloc = urlparse(url.strip().lower()).netloc
		netloc = netloc.split('.google')[0]

		if netloc == 'docs' or netloc == 'drive':
			url = url.split('/preview', 1)[0]
			url = url.replace('drive.google.com', 'docs.google.com')

		headers = {'User-Agent': client.agent()}
		result = client.request(url, output='extended', headers=headers)

		try: headers['Cookie'] = result[2]['Set-Cookie']
		except: pass

		result = result[0]
		if netloc == 'docs' or netloc == 'drive':
			result = re.compile(r'"fmt_stream_map",(".+?")').findall(result)[0]
			result = jsloads(result)
			result = [i.split('|')[-1] for i in result.split(',')]
			result = sum([googletag(i, append_height=True) for i in result], [])

		elif netloc == 'photos':
			result = result.replace('\r', '').replace('\n', '').replace('\t', '')
			result = re.compile(r'"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]
			result = result.replace('\\u003d', '=').replace('\\u0026', '&')
			result = re.compile(r'url=(.+?)&').findall(result)
			result = [unquote(i) for i in result]
			result = sum([googletag(i, append_height=True) for i in result], [])

		elif netloc == 'picasaweb':
			id = re.compile(r'#(\d*)').findall(url)[0]
			result = re.search(r'feedPreload:\s*(.*}]}})},', result, re.S).group(1)
			result = jsloads(result)['feed']['entry']

			if len(result) > 1: result = [i for i in result if str(id) in i['link'][0]['href']][0]
			elif len(result) == 1: result = result[0]

			result = result['media']['content']
			result = [i['url'] for i in result if 'video' in i['type']]
			result = sum([googletag(i, append_height=True) for i in result], [])

		elif netloc == 'plus':
			id = (urlparse(url).path).split('/')[-1]
			result = result.replace('\r', '').replace('\n', '').replace('\t', '')
			result = result.split('"%s"' % id)[-1].split(']]')[0]
			result = result.replace('\\u003d', '=').replace('\\u0026', '&')
			result = re.compile(r'url=(.+?)&').findall(result)
			result = [unquote(i) for i in result]
			result = sum([googletag(i, append_height=True) for i in result], [])
		result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)
		url = []

		for q in ('4K', '1440p', '1080p', 'HD', 'SD'):
			try: url += [[i for i in result if i.get('quality') == q][0]]
			except: pass

		for i in url:
			i.pop('height', None)
			i.update({'url': i['url'] + '|%s' % urlencode(headers)})
		if not url: return
		return url
	except: return
 def prefs_open_url(self, url_params):
     url = unquote(url_params['query']['url'])
     logger.info('Open Web URL %s', url)
     OpenUrlAction(url).run()
Example #40
0
print(userInput)

#query="%20".join(map(str,userInput))

query="+".join(userInput)
print (query)
url=ddg+query+ddgfooter
print(url)

page=requests.get(url)

soup = BeautifulSoup(page.content, 'html.parser')
firstLink=soup.find_all(class_="result__a")
#print(firstLink)
#print(firstLink[0])

print(firstLink[0]['href']) #printing the href value
strurl = unquote(firstLink[0]['href']) #getting clean url
noeqto=strurl.split("uddg=") #getting rid of a ddg custom tag
print(noeqto[1])			         #clean url lies after '='
						#that's in the 2nd element
						#of the list

#print(fistLink.prettify())
#print(page.content)





Example #41
0
def generate_trending(db, time, genre, limit, offset):
    identity_url = shared_config['discprov']['identity_service_url']
    identity_trending_endpoint = urljoin(identity_url,
                                         f"/tracks/trending/{time}")

    post_body = {}
    post_body["limit"] = limit
    post_body["offset"] = offset

    # Retrieve genre and query all tracks if required
    if genre is not None:
        # Parse encoded characters, such as Hip-Hop%252FRap -> Hip-Hop/Rap
        genre = unquote(genre)
        with db.scoped_session() as session:
            genre_list = get_genre_list(genre)
            genre_track_ids = (session.query(Track.track_id).filter(
                Track.genre.in_(genre_list), Track.is_current == True,
                Track.is_delete == False, Track.is_unlisted == False,
                Track.stem_of == None).all())
            genre_specific_track_ids = [
                record[0] for record in genre_track_ids
            ]
            post_body["track_ids"] = genre_specific_track_ids

    # Query trending information from identity service
    resp = None
    try:
        resp = requests.post(identity_trending_endpoint, json=post_body)
    except Exception as e:  # pylint: disable=W0703
        logger.error(
            f'Error retrieving trending info - {identity_trending_endpoint}, {post_body}'
        )
        return api_helpers.error_response(e, 500)

    json_resp = resp.json()
    if "error" in json_resp:
        return api_helpers.error_response(json_resp["error"], 500)

    listen_counts = json_resp["listenCounts"]
    # Convert trackId to snakeCase
    for track_entry in listen_counts:
        track_entry[response_name_constants.track_id] = track_entry['trackId']
        del track_entry['trackId']

    track_ids = [
        track[response_name_constants.track_id] for track in listen_counts
    ]

    with db.scoped_session() as session:
        # Filter tracks to not-deleted ones so trending order is preserved
        not_deleted_track_ids = (session.query(
            Track.track_id,
            Track.created_at).filter(Track.track_id.in_(track_ids),
                                     Track.is_current == True,
                                     Track.is_delete == False,
                                     Track.is_unlisted == False,
                                     Track.stem_of == None).all())

        # Generate track -> created_at date
        track_created_at_dict = {
            record[0]: record[1]
            for record in not_deleted_track_ids
        }

        not_deleted_track_ids = set(
            [record[0] for record in not_deleted_track_ids])  # pylint: disable=R1718
        # Query repost counts
        repost_counts = get_repost_counts(session, False, True,
                                          not_deleted_track_ids, None)
        # Generate track_id --> repost_count mapping
        track_repost_counts = {
            repost_item_id: repost_count
            for (repost_item_id, repost_count, repost_type) in repost_counts
            if repost_type == RepostType.track
        }

        # Query repost count with respect to rolling time frame in URL (e.g. /trending/week -> window = rolling week)
        track_repost_counts_for_time = \
            get_repost_counts(session, False, True, not_deleted_track_ids, None, None, time)
        # Generate track_id --> windowed_save_count mapping
        track_repost_counts_for_time = {
            repost_item_id: repost_count
            for (repost_item_id, repost_count,
                 repost_type) in track_repost_counts_for_time
            if repost_type == RepostType.track
        }

        # Query follower info for each track owner
        # Query each track owner
        track_owners_query = (session.query(
            Track.track_id, Track.owner_id).filter(
                Track.is_current == True,
                Track.is_unlisted == False, Track.stem_of == None,
                Track.track_id.in_(not_deleted_track_ids)).all())

        # Generate track_id <-> owner_id mapping
        track_owner_dict = {
            track_id: owner_id
            for (track_id, owner_id) in track_owners_query
        }
        # Generate list of owner ids
        track_owner_list = [
            owner_id for (track_id, owner_id) in track_owners_query
        ]

        # build dict of owner_id --> follower_count
        follower_counts = (session.query(
            Follow.followee_user_id,
            func.count(Follow.followee_user_id)).filter(
                Follow.is_current == True, Follow.is_delete == False,
                Follow.followee_user_id.in_(track_owner_list)).group_by(
                    Follow.followee_user_id).all())
        follower_count_dict = \
                {user_id: follower_count for (user_id, follower_count) in follower_counts}

        # Query save counts
        save_counts = get_save_counts(session, False, True,
                                      not_deleted_track_ids, None)
        # Generate track_id --> save_count mapping
        track_save_counts = {
            save_item_id: save_count
            for (save_item_id, save_count, save_type) in save_counts
            if save_type == SaveType.track
        }

        # Query save counts with respect to rolling time frame in URL (e.g. /trending/week -> window = rolling week)
        save_counts_for_time = get_save_counts(session, False, True,
                                               not_deleted_track_ids, None,
                                               None, time)
        # Generate track_id --> windowed_save_count mapping
        track_save_counts_for_time = {
            save_item_id: save_count
            for (save_item_id, save_count, save_type) in save_counts_for_time
            if save_type == SaveType.track
        }

        trending_tracks = []
        for track_entry in listen_counts:
            # Skip over deleted tracks
            if track_entry[response_name_constants.
                           track_id] not in not_deleted_track_ids:
                continue

            # Populate repost counts
            if track_entry[
                    response_name_constants.track_id] in track_repost_counts:
                track_entry[response_name_constants.repost_count] = \
                        track_repost_counts[track_entry[response_name_constants.track_id]]
            else:
                track_entry[response_name_constants.repost_count] = 0

            # Populate repost counts with respect to time
            if track_entry[response_name_constants.
                           track_id] in track_repost_counts_for_time:
                track_entry[response_name_constants.windowed_repost_count] = \
                    track_repost_counts_for_time[track_entry[response_name_constants.track_id]]
            else:
                track_entry[response_name_constants.windowed_repost_count] = 0

            # Populate save counts
            if track_entry[
                    response_name_constants.track_id] in track_save_counts:
                track_entry[response_name_constants.save_count] = \
                        track_save_counts[track_entry[response_name_constants.track_id]]
            else:
                track_entry[response_name_constants.save_count] = 0

            # Populate save counts with respect to time
            if track_entry[response_name_constants.
                           track_id] in track_save_counts_for_time:
                track_entry[response_name_constants.windowed_save_count] = \
                        track_save_counts_for_time[track_entry[response_name_constants.track_id]]
            else:
                track_entry[response_name_constants.windowed_save_count] = 0

            # Populate listen counts
            owner_id = track_owner_dict[track_entry[
                response_name_constants.track_id]]
            owner_follow_count = 0
            if owner_id in follower_count_dict:
                owner_follow_count = follower_count_dict[owner_id]
            track_entry[response_name_constants.track_owner_id] = owner_id
            track_entry[response_name_constants.
                        track_owner_follower_count] = owner_follow_count

            # Populate created at timestamps
            if track_entry[
                    response_name_constants.track_id] in track_created_at_dict:
                # datetime needs to be in isoformat for json.dumps() in `update_trending_cache()` to
                # properly process the dp response and add to redis cache
                # timespec = specifies additional components of the time to include
                track_entry[response_name_constants.created_at] = \
                        track_created_at_dict[track_entry[response_name_constants.track_id]] \
                            .isoformat(timespec='seconds')
            else:
                track_entry[response_name_constants.created_at] = None

            trending_tracks.append(track_entry)

    final_resp = {}
    final_resp['listen_counts'] = trending_tracks
    return final_resp
Example #42
0
    def test_configure_time(self):
        layer_name = 'boxes_with_end_date'
        # make sure it's not there (and configured)
        cascading_delete(gs_catalog, layer_name)

        def get_wms_timepositions():
            alternate_name = 'geonode:%s' % layer_name
            if alternate_name in get_wms().contents:
                metadata = get_wms().contents[alternate_name]
                self.assertTrue(metadata is not None)
                return metadata.timepositions
            else:
                return None

        thefile = os.path.join(
            GOOD_DATA, 'time', '%s.shp' % layer_name
        )
        resp, data = self.client.upload_file(thefile)

        # initial state is no positions or info
        self.assertTrue(get_wms_timepositions() is None)
        self.assertEqual(resp.status_code, 200)

        # enable using interval and single attribute
        if not isinstance(data, string_types):
            self.wait_for_progress(data.get('progress'))
            self.assertTrue(data['success'])
            self.assertTrue(data['redirect_to'], upload_step('time'))
            redirect_to = data['redirect_to']
            resp, data = self.client.get_html(upload_step('time'))
            self.assertEqual(resp.status_code, 200)
            data = dict(csrfmiddlewaretoken=self.client.get_csrf_token(),
                        time_attribute='date',
                        time_end_attribute='enddate',
                        presentation_strategy='LIST',
                        )
            resp = self.client.make_request(redirect_to, data)
            self.assertEqual(resp.status_code, 200)
            resp_js = resp.json()
            if resp_js['success']:
                url = resp_js['redirect_to']

                resp = self.client.make_request(url, data)

                url = resp.json()['url']

                self.assertTrue(
                    url.endswith(layer_name),
                    'expected url to end with %s, but got %s' %
                    (layer_name,
                     url))
                self.assertEqual(resp.status_code, 200)

                url = unquote(url)
                self.check_layer_complete(url, layer_name)
                wms = get_wms(
                    type_name='geonode:%s' % layer_name, username=GEOSERVER_USER, password=GEOSERVER_PASSWD)
                layer_info = list(wms.items())[0][1]
                self.assertEqual(100, len(layer_info.timepositions))
            else:
                self.assertTrue('error_msg' in resp_js)
                self.assertTrue(
                    'Source SRS is not valid' in resp_js['error_msg'])
Example #43
0
 def GetItem(self, url, grandid=0, parent='', trytime=1):
     app_url = GetAppUrl(self.user)
     od_type = get_value('od_type', self.user)
     token = GetToken(user=self.user)
     InfoLogger().print_r(u'[start] getting files from url {}'.format(url))
     headers = {'Authorization': 'Bearer {}'.format(token)}
     headers.update(default_headers)
     try:
         self.CheckPathSize(url.replace('children?expand=thumbnails', ''))
         r = browser.get(url, headers=headers, timeout=10)
         data = json.loads(r.content)
         if data.get('error'):
             InfoLogger().print_r('error:{}! waiting 180s'.format(
                 data.get('error').get('message')))
             time.sleep(180)
             self.queue.put(
                 dict(url=url,
                      grandid=grandid,
                      parent=parent,
                      trytime=trytime))
             return
         values = data.get('value')
         if len(values) > 0:
             for value in values:
                 item = {}
                 if value.get('folder'):
                     folder = mon_db.items.find_one({
                         'id': value['id'],
                         'user': self.user
                     })
                     if folder is not None:
                         if folder['size_order'] == int(
                                 value['size']):  #文件夹大小未变化,不更新
                             InfoLogger().print_r(
                                 u'path:{},origin size:{},current size:{}--------no change'
                                 .format(value['name'],
                                         folder['size_order'],
                                         value['size']))
                         else:
                             mon_db.items.delete_one({'id': value['id']})
                             item['type'] = 'folder'
                             item['user'] = self.user
                             item['order'] = 0
                             item['name'] = convert2unicode(value['name'])
                             item['id'] = convert2unicode(value['id'])
                             item['size'] = humanize.naturalsize(
                                 value['size'], gnu=True)
                             item['size_order'] = int(value['size'])
                             item['lastModtime'] = date_to_char(
                                 parse(value['lastModifiedDateTime']))
                             item['grandid'] = grandid
                             item['parent'] = parent
                             grand_path = value.get('parentReference').get(
                                 'path').replace('/drive/root:', '')
                             if grand_path == '':
                                 path = convert2unicode(value['name'])
                             else:
                                 path = grand_path.replace(
                                     self.share_path, '',
                                     1) + '/' + convert2unicode(
                                         value['name'])
                             if path.startswith('/') and path != '/':
                                 path = path[1:]
                             if path == '':
                                 path = convert2unicode(value['name'])
                             path = urllib.unquote('{}:/{}'.format(
                                 self.user, path))
                             item['path'] = path
                             subfodler = mon_db.items.insert_one(item)
                             if value.get('folder').get('childCount') == 0:
                                 continue
                             else:
                                 parent_path = value.get(
                                     'parentReference').get('path').replace(
                                         '/drive/root:', '')
                                 path = convert2unicode(parent_path + '/' +
                                                        value['name'])
                                 # path=urllib.quote(convert2unicode(parent_path+'/'+value['name']))
                                 if od_type == 'nocn' or od_type is None or od_type == False:
                                     url = app_url + 'v1.0/me/drive/root:{}:/children?expand=thumbnails'.format(
                                         path)
                                 else:
                                     url = app_url + '_api/v2.0/me/drive/root:{}:/children?expand=thumbnails'.format(
                                         path)
                                 self.queue.put(
                                     dict(url=url,
                                          grandid=grandid + 1,
                                          parent=item['id'],
                                          trytime=1))
                     else:
                         mon_db.items.delete_one({'id': value['id']})
                         item['type'] = 'folder'
                         item['user'] = self.user
                         item['order'] = 0
                         item['name'] = convert2unicode(value['name'])
                         item['id'] = convert2unicode(value['id'])
                         item['size'] = humanize.naturalsize(value['size'],
                                                             gnu=True)
                         item['size_order'] = int(value['size'])
                         item['lastModtime'] = date_to_char(
                             parse(value['lastModifiedDateTime']))
                         item['grandid'] = grandid
                         item['parent'] = parent
                         grand_path = value.get('parentReference').get(
                             'path').replace('/drive/root:', '')
                         if grand_path == '':
                             path = convert2unicode(value['name'])
                         else:
                             path = grand_path.replace(
                                 self.share_path, '',
                                 1) + '/' + convert2unicode(value['name'])
                         if path.startswith('/') and path != '/':
                             path = path[1:]
                         if path == '':
                             path = convert2unicode(value['name'])
                         path = urllib.unquote('{}:/{}'.format(
                             self.user, path))
                         item['path'] = path
                         subfodler = mon_db.items.insert_one(item)
                         if value.get('folder').get('childCount') == 0:
                             continue
                         else:
                             parent_path = value.get('parentReference').get(
                                 'path').replace('/drive/root:', '')
                             path = convert2unicode(parent_path + '/' +
                                                    value['name'])
                             # path=urllib.quote(convert2unicode(parent_path+'/'+value['name']))
                             if od_type == 'nocn' or od_type is None or od_type == False:
                                 url = app_url + 'v1.0/me/drive/root:{}:/children?expand=thumbnails'.format(
                                     path)
                             else:
                                 url = app_url + '_api/v2.0/me/drive/root:{}:/children?expand=thumbnails'.format(
                                     path)
                             self.queue.put(
                                 dict(url=url,
                                      grandid=grandid + 1,
                                      parent=item['id'],
                                      trytime=1))
                 else:
                     if mon_db.items.find_one({'id': value['id']
                                               }) is not None:  #文件存在
                         continue
                     else:
                         item['type'] = GetExt(value['name'])
                         grand_path = value.get('parentReference').get(
                             'path').replace('/drive/root:', '')
                         if grand_path == '':
                             path = convert2unicode(value['name'])
                         else:
                             path = grand_path.replace(
                                 self.share_path, '',
                                 1) + '/' + convert2unicode(value['name'])
                         if path.startswith('/') and path != '/':
                             path = path[1:]
                         if path == '':
                             path = convert2unicode(value['name'])
                         path = urllib.unquote('{}:/{}'.format(
                             self.user, path))
                         item['path'] = path
                         item['user'] = self.user
                         item['name'] = convert2unicode(value['name'])
                         item['id'] = convert2unicode(value['id'])
                         item['size'] = humanize.naturalsize(value['size'],
                                                             gnu=True)
                         item['size_order'] = int(value['size'])
                         item['lastModtime'] = date_to_char(
                             parse(value['lastModifiedDateTime']))
                         item['grandid'] = grandid
                         item['parent'] = parent
                         if GetExt(value['name']) in [
                                 'bmp', 'jpg', 'jpeg', 'png', 'gif'
                         ]:
                             item['order'] = 3
                             key1 = 'name:{}'.format(value['id'])
                             key2 = 'path:{}'.format(value['id'])
                             redis_client.set(key1, value['name'])
                             redis_client.set(key2, path)
                         elif value['name'] == '.password':
                             item['order'] = 1
                         else:
                             item['order'] = 2
                         mon_db.items.insert_one(item)
         else:
             InfoLogger().print_r('{}\'s size is zero'.format(url))
         if data.get('@odata.nextLink'):
             self.queue.put(
                 dict(url=data.get('@odata.nextLink'),
                      grandid=grandid,
                      parent=parent,
                      trytime=1))
         InfoLogger().print_r(
             u'[success] getting files from url {}'.format(url))
     except Exception as e:
         exestr = traceback.format_exc()
         trytime += 1
         ErrorLogger().print_r(
             u'error to opreate GetItem("{}","{}","{}"),try times :{}, reason: {}'
             .format(url, grandid, parent, trytime, exestr))
         if trytime <= 3:
             self.queue.put(
                 dict(url=url,
                      grandid=grandid,
                      parent=parent,
                      trytime=trytime))
Example #44
0
def unquote_raw(value):
    print(unquote(value))
    return unquote(value)
Example #45
0
 def unescaped_identifier(self):
     return unquote(self.identifier).replace('_', ' ')
Example #46
0
from bs4 import BeautifulSoup

print("이 스크립트는 크롤링을 통해 롤 챔피언 이미지를 모두 다운로드합니다.")
print("2019-07-25 기준의 웹 페이지에 맞춰 스크립트가 작성되어 있습니다.")
print("\nConnecting to wiki..")
res = urllib.request.urlopen(
    "https://leagueoflegends.fandom.com/wiki/Champion")
soup = BeautifulSoup(res.read().decode(), "html.parser")

download_dir = "champions"
if not os.path.exists(download_dir):
    os.mkdir(download_dir)

ol = soup.select_one(".champion_roster")
imgs = list(filter(lambda x: ".png" in x["src"], ol.select("img")))
for n, img in enumerate(imgs):
    url = img["src"].split(".png")[0] + ".png"
    image_name = unquote(
        url.split("/")[-1].replace("_OriginalCircle", "").replace("_", " "))
    urllib.request.urlretrieve(url, os.path.join(download_dir, image_name))
    print(img["src"].split(".png")[0] + ".png", "==>", image_name,
          "(%d/%d)" % (n + 1, len(imgs)))

print("\n[NOTICE]")
print("THE IMAGES ARE COPYRIGHTED TO RIOT GAMES INC.")
print(
    "HOWEVER: Riot Games allows use of their League of Legends intellectual property when meeting the conditions lined in their Legal Jibber-Jabber policy."
)
print("\nComplete!")
print(download_dir, "폴더(경로)에 저장되었습니다.")
Example #47
0
    def to_python(
            self, value
    ):  # decode once as the wsgi app already receives a decoded url
        # type: (str, ) -> str

        return unquote(value)
Example #48
0
def AddResource(data, user=GetConfig('default_pan')):
    #检查父文件夹是否在数据库,如果不在则获取添加
    grand_path = data.get('parentReference').get('path').replace(
        '/drive/root:', '')  #空值或者/path
    try:
        grand_path = urllib.unquote(grand_path.encode('utf-8')).decode('utf-8')
    except:
        grand_path = grand_path
    if grand_path == '':
        parent_id = ''
    else:
        g = GetItemThread(Queue(), user)
        parent_id = data.get('parentReference').get('id')
        if grand_path.startswith('/'):
            grand_path = grand_path[1:]
        if grand_path != '':
            parent_path = '/'
            pid = ''
            for idx, p in enumerate(grand_path.split('/')):
                parent = mon_db.items.find_one({
                    'name': p,
                    'grandid': idx,
                    'parent': pid,
                    'user': user
                })
                InfoLogger().print_r(
                    '[*AddResource] check parent path exists? user: {},name:{} ,parent id:{}; exists:{}'
                    .format(user, p, pid, parent is not None))
                if parent is not None:
                    pid = parent['id']
                    parent_path = '/'.join([parent_path, parent['name']])
                else:
                    parent_path = ('/'.join([parent_path,
                                             p])).replace('//', '/')
                    fdata = g.GetItemByPath(parent_path)
                    path = user + ':/' + parent_path.replace('///', '/')
                    path = path.replace('///', '/').replace('//', '/')
                    path = urllib.unquote(path).decode('utf-8')
                    InfoLogger().print_r(
                        '[*AddResource] parent path:{} is not exists; Add data in mongo:{}'
                        .format(parent_path, path))
                    item = {}
                    item['type'] = 'folder'
                    item['user'] = user
                    item['order'] = 0
                    item['name'] = fdata.get('name')
                    item['id'] = fdata.get('id')
                    item['size'] = humanize.naturalsize(fdata.get('size'),
                                                        gnu=True)
                    item['size_order'] = fdata.get('size')
                    item['lastModtime'] = date_to_char(
                        parse(fdata['lastModifiedDateTime']))
                    item['grandid'] = idx
                    item['parent'] = pid
                    item['path'] = path
                    mon_db.items.insert_one(item)
                    pid = fdata.get('id')
    #插入数据
    item = {}
    item['type'] = GetExt(data.get('name'))
    item['name'] = data.get('name')
    item['user'] = user
    item['id'] = data.get('id')
    item['size'] = humanize.naturalsize(data.get('size'), gnu=True)
    item['size_order'] = data.get('size')
    item['lastModtime'] = date_to_char(parse(data.get('lastModifiedDateTime')))
    item['parent'] = parent_id
    if grand_path == '':
        path = user + ':/' + convert2unicode(data['name'])
    else:
        path = user + ':/' + grand_path + '/' + convert2unicode(data['name'])
    path = path.replace('//', '/')
    path = urllib.unquote(path).decode('utf-8')
    grandid = len(path.split('/')) - 2
    item['grandid'] = grandid
    item['path'] = path
    InfoLogger().print_r('AddResource: name:{};path:{};grandid:{}'.format(
        data.get('name'), path, grandid))
    if GetExt(data['name']) in ['bmp', 'jpg', 'jpeg', 'png', 'gif']:
        item['order'] = 3
    elif data['name'] == '.password':
        item['order'] = 1
    else:
        item['order'] = 2
    mon_db.items.insert_one(item)
Example #49
0
    def get_episodes(self, program=None, season=None, episodes=None, category=None, feature=None, programtype=None, keywords=None,
                     whatson_id=None, video_id=None, video_url=None, page=None, use_favorites=False, variety=None, cache_file=None):
        """Get episodes or season data from VRT NU Search API"""

        # Contruct params
        if page:
            page = realpage(page)
            all_items = False
            items_per_page = get_setting_int('itemsperpage', default=50)
            params = {
                'from': ((page - 1) * items_per_page) + 1,
                'i': 'video',
                'size': items_per_page,
            }
        elif variety == 'single':
            all_items = False
            params = {
                'i': 'video',
                'size': '1',
            }
        else:
            all_items = True
            params = {
                'i': 'video',
                'size': '300',
            }

        if variety:
            season = 'allseasons'

            if variety == 'offline':
                from datetime import datetime
                import dateutil.tz
                params['facets[assetOffTime]'] = datetime.now(dateutil.tz.gettz('Europe/Brussels')).strftime('%Y-%m-%d')

            if variety == 'oneoff':
                params['facets[episodeNumber]'] = '[0,1]'  # This to avoid VRT NU metadata errors (see #670)
                params['facets[programType]'] = 'oneoff'

            if variety == 'watchlater':
                self._resumepoints.refresh(ttl=ttl('direct'))
                episode_urls = self._resumepoints.watchlater_urls()
                params['facets[url]'] = '[%s]' % (','.join(episode_urls))

            if variety == 'continue':
                self._resumepoints.refresh(ttl=ttl('direct'))
                episode_urls = self._resumepoints.resumepoints_urls()
                params['facets[url]'] = '[%s]' % (','.join(episode_urls))

            if use_favorites:
                program_urls = [program_to_url(p, 'medium') for p in self._favorites.programs()]
                params['facets[programUrl]'] = '[%s]' % (','.join(program_urls))
            elif variety in ('offline', 'recent'):
                channel_filter = [channel.get('name') for channel in CHANNELS if get_setting_bool(channel.get('name'), default=True)]
                params['facets[programBrands]'] = '[%s]' % (','.join(channel_filter))

        if program:
            params['facets[programUrl]'] = program_to_url(program, 'medium')

        if season and season != 'allseasons':
            params['facets[seasonTitle]'] = season

        if episodes:
            params['facets[episodeNumber]'] = '[%s]' % (','.join(str(episode) for episode in episodes))

        if category:
            params['facets[categories]'] = category

        if feature:
            params['facets[programTags.title]'] = feature

        if programtype:
            params['facets[programType]'] = programtype

        if keywords:
            if not season:
                season = 'allseasons'
            params['q'] = quote_plus(from_unicode(keywords))
            params['highlight'] = 'true'

        if whatson_id:
            params['facets[whatsonId]'] = whatson_id

        if video_id:
            params['facets[videoId]'] = video_id

        if video_url:
            params['facets[url]'] = video_url

        # Construct VRT NU Search API Url and get api data
        querystring = '&'.join('{}={}'.format(key, value) for key, value in list(params.items()))
        search_url = self._VRTNU_SEARCH_URL + '?' + querystring.replace(' ', '%20')  # Only encode spaces to minimize url length
        if cache_file:
            search_json = get_cached_url_json(url=search_url, cache=cache_file, ttl=ttl('indirect'), fail={})
        else:
            search_json = get_url_json(url=search_url, fail={})

        # Check for multiple seasons
        seasons = []
        if 'facets[seasonTitle]' not in unquote(search_url):
            facets = search_json.get('facets', {}).get('facets')
            if facets:
                seasons = next((f.get('buckets', []) for f in facets if f.get('name') == 'seasons' and len(f.get('buckets', [])) > 1), None)

        episodes = search_json.get('results', [{}])
        show_seasons = bool(season != 'allseasons')

        # Return seasons
        if show_seasons and seasons:
            return (seasons, episodes)

        api_pages = search_json.get('meta').get('pages').get('total')
        api_page_size = search_json.get('meta').get('pages').get('size')
        total_results = search_json.get('meta').get('total_results')

        if all_items and total_results > api_page_size:
            for api_page in range(1, api_pages):
                api_page_url = search_url + '&from=' + str(api_page * api_page_size + 1)
                api_page_json = get_url_json(api_page_url)
                if api_page_json is not None:
                    episodes += api_page_json.get('results', [{}])

        # Return episodes
        return episodes
Example #50
0
def apply_descrambler(stream_data: Dict, key: str) -> None:
    """Apply various in-place transforms to YouTube's media stream data.

    Creates a ``list`` of dictionaries by string splitting on commas, then
    taking each list item, parsing it as a query string, converting it to a
    ``dict`` and unquoting the value.

    :param dict stream_data:
        Dictionary containing query string encoded values.
    :param str key:
        Name of the key in dictionary.

    **Example**:

    >>> d = {'foo': 'bar=1&var=test,em=5&t=url%20encoded'}
    >>> apply_descrambler(d, 'foo')
    >>> print(d)
    {'foo': [{'bar': '1', 'var': 'test'}, {'em': '5', 't': 'url encoded'}]}

    """
    otf_type = "FORMAT_STREAM_TYPE_OTF"

    if key == "url_encoded_fmt_stream_map" and not stream_data.get(
            "url_encoded_fmt_stream_map"):
        formats = json.loads(
            stream_data["player_response"])["streamingData"]["formats"]
        formats.extend(
            json.loads(stream_data["player_response"])["streamingData"]
            ["adaptiveFormats"])
        try:
            stream_data[key] = [{
                "url":
                format_item["url"],
                "type":
                format_item["mimeType"],
                "quality":
                format_item["quality"],
                "itag":
                format_item["itag"],
                "bitrate":
                format_item.get("bitrate"),
                "is_otf": (format_item.get("type") == otf_type),
            } for format_item in formats]
        except KeyError:
            cipher_url = [
                parse_qs(formats[i]["signatureCipher"])
                for i, data in enumerate(formats)
                if "signatureCipher" in formats[i]
            ]
            stream_data[key] = [{
                "url":
                cipher_url[i]["url"][0],
                "s":
                cipher_url[i]["s"][0],
                "type":
                format_item["mimeType"],
                "quality":
                format_item["quality"],
                "itag":
                format_item["itag"],
                "bitrate":
                format_item.get("bitrate"),
                "is_otf": (format_item.get("type") == otf_type),
            } for i, format_item in enumerate(formats)]
    else:
        stream_data[key] = [{k: unquote(v)
                             for k, v in parse_qsl(i)}
                            for i in stream_data[key].split(",")]

    logger.debug("applying descrambler")
Example #51
0
 def to_python(self, value):
     return unquote(value)
 def put(self, filename):
     filename = unquote(filename)
     mtype = self.request.headers.get("Content-Type")
     logging.info('PUT "%s" "%s" %d bytes', filename, mtype,
                  self.bytes_read)
     self.write("OK")
Example #53
0
 def version(self):
     if 'version' in self.kwargs:
         return unquote(self.kwargs['version'])
     return None
Example #54
0
def application(env):
    """
CONTENT_ENCODING = <class 'str'> gzip
CONTENT_LENGTH = <class 'int'> 5421
CONTENT_TYPE = <class 'str'> application/json
HTTP_KWARGS = <class 'dict'> {}
HTTP_METHOD = <class 'str'> POST
HTTP_PARAMS = <class 'list'> []
REMOTE_ADDR = <class 'str'> 79.104.1.86
REMOTE_PORT = <class 'str'> 65083
ROOT = <class 'str'> /usr/share/nginx/html
SCGI = <class 'str'> 1
SERVER_NAME = <class 'str'> online365.pro
SERVER_PORT = <class 'str'> 443
URI = <class 'str'> /sgg/
X-API-KEY = <class 'str'> 80a3fd3ba997493f837894f1af803216
X-BODY-FILE = <class 'str'> /usr/share/nginx/temp/0000000005
scgi.defer = <class 'NoneType'> None
scgi.initv = <class 'list'> [('127.0.0.1', 50703), 6113]
scgi.rfile = <class '_io.BufferedReader'> <_io.BufferedReader name=5>
scgi.wfile = <class 'socket.SocketIO'> <socket.SocketIO object at 0x7f90edc44240>
"""
    import zlib
    import urllib.parse as urllib2
    from io import BytesIO as StringIO
    from ean13.ean2pdfV2 import main2
    from ean13.ean2odsV2 import toods

    def saveTxt2Pdf(cp, text, method=None):
        try:
            output = StringIO()
            rows = text.splitlines()
            main2(__appname__, rows, output, method)
            data = output.getvalue()
            return data
        except:
            log(traceback.format_exc(), kind="error")
        finally:
            output.close()

    def saveTxt2Ods(cp, text, method=None):
        try:
            output = StringIO()
            rows = text.splitlines()
            toods(__appname__, rows, output, method)
            data = output.getvalue()
            return data
        except:
            log(traceback.format_exc(), kind="error")
        finally:
            output.close()

    addr, pid = env["scgi.initv"][:2]
    msg = f'{addr[0]} {addr[1]} {env["HTTP_METHOD"]} {env["URI"]} {env["HTTP_PARAMS"]} {env["HTTP_KWARGS"]}'
    env["scgi.defer"] = lambda: log("%s close" % msg)
    log(msg)
    uri = env["URI"].lower()
    qw = uri.split('/ean13')
    method = qw[-1]
    cp = 'utf8'
    q = None
    _rm = env["HTTP_METHOD"].upper()

    #env["HTTP_PARAMS"] = u'1251&Товар1;1007290004649;1;123.45&Товар2;1007290004977;2;678,90'
    #env["HTTP_PARAMS"] = 'ods&Товар1;1007290004649;1;123.45&Товар2;1007290004977;2;678,90'
    #env["HTTP_PARAMS"] = 'Товар1;1007290004649;1;123.45&Товар2;1007290004977;2;678,90'
    #env["HTTP_PARAMS"] = 'Аптека низких цен!~Мезим фортеТовар мазь крем для рук Мезим форте;1007290004649;1;123.45&Товар2;1007290004977;2;12223.45'

    try:
        _qs = '&'.join(env["HTTP_PARAMS"])
    except:
        _qs = ''
    fgOds = _qs.find('ods') > -1
    LastModified = time.strftime('%a, %d %b %Y %X GMT', time.gmtime())
    content = ''
    header = []

    #print(_rm)
    if 'HEAD' == _rm:
        header = head(LastModified, len(content), True)
    elif 'POST' == _rm:
        q = env['scgi.rfile'].read(env['CONTENT_LENGTH'])
        try:
            q = zlib.decompress(q)
        except:
            pass
        if q:
            try:
                _cp = _qs[:4]
                if '1251' == _cp:
                    cp = _cp
                q = q.decode(cp)
            except:
                pass

            fgOds_p = q.find('ods') > -1
            log(q, kind='REQUEST')
            if fgOds or fgOds_p:
                content = saveTxt2Ods(cp, q, method)
                header = head(LastModified, len(content), False, fgOds)
                #with open('11.ods', 'wb') as ff:
                #ff.write(content)
            else:
                ww = saveTxt2Pdf(cp, q, method)
                content = zlib.compress(ww)
                header = head(LastModified, len(content), True, fgOds)
    elif 'GET' == _rm:
        try:
            _cp = _qs[:4]
            if '1251' == _cp:
                cp = _cp
                q = urllib2.unquote(_qs[4:].replace(';',
                                                    '\t').replace('&', '\n'))
            else:
                q = urllib2.unquote(_qs.replace(';', '\t').replace('&', '\n'))
            try:
                q = q.decode(cp)
            except:
                pass
        except:
            q = ''
        if q:
            log(q, kind='REQUEST')
            if fgOds:
                content = saveTxt2Ods(cp, q, method)
                header = head(LastModified, len(content), False, fgOds)
            else:
                ww = saveTxt2Pdf(cp, q, method)
                content = zlib.compress(ww)
                header = head(LastModified, len(content), True, fgOds)
        else:
            header = [("Last-Modified", "%s" % LastModified),
                      ("Cache-Control", "no-cache"),
                      ("Content-Type", "text/plain; charset=UTF-8"),
                      ("X-Accel-Buffering", "no")]
            content = u"""Манускрипт.Онлайн

Сервис EAN13 формирует pdf или ods файл пригодный для печати этикеток штрих-кодов формата 30x20.

Пример использования (метод GET):
для обычных ценников:
  https://online365.pro/ean13?1251&ods&Товар1;1007290004649;1;123.45&Товар2;1007290004977;2;678,90
для увеличенного шрифта:
  https://online365.pro/ean13big?1251&ods&Товар1;1007290004649;1;123.45&Товар2;1007290004977;2;678,90
для увиличенного жирного шрифта:
  https://online365.pro/ean13bigbold?1251&ods&Товар1;1007290004649;1;123.45&Товар2;1007290004977;2;678,90
где
  1251          - не обязательный параметр указывающий в какой кодировке передаются данные,
                  если отсутствует, то данные должны быть в utf8
  ods           - не обязательный параметр указывающий в каком формате возвращать данные,
                  если отсутствует, то данные вернутся в pdf
  Товар1        - имя товара до 105 символов
                  товар можно передать с доп. информацией, например,
                  Текст1~Товар1
                  2|Текст1~Товар1
                  где 2| количество повторений доп. информации
  1007290004649 - штрих-код товара
  1             - кол-во повторений штрих-кода для данного товара
  123.45        - цена товара, в цене допускается разделитель "." или ","
                  цену можно передать с доп. информацией, например,
                  Текст1~123.45~txt2~txt3
                  00031~123.45~29.06~15

Пример использования (метод POST):
  https://online365.pro/ean13 - для обычных ценников
  https://online365.pro/ean13big - для увеличенного шрифта
  https://online365.pro/ean13bigbold - для увиличенного жирного шрифта

  данные передаются в "текстовом формате резделитель табуляция", метод сжатия - deflate
  Товар1 <tab> 1007290004649 <tab> 1 <tab> 123.45 <newline>
  Товар2 <tab> 1007290004977 <tab> 2 <tab> 678,90 <newline>
""".encode('utf-8')

    # три обязательных вызова yield: статус, заголовки, содержание
    yield '200 OK'
    yield header
    yield content
Example #55
0
def get_urls():
    output = check_output('brotab_client.py list_tabs | cut -f3', shell=True)
    output = output.split()
    output = [unquote(line.decode('utf8').strip()) for line in output]
    #output = output.split()
    return output
Example #56
0
def clean(name, text):
    page = etree.HTML(text)
    top_tag = page.xpath('//div[@class="mw-parser-output"]')[0]

    # Remove loads of unnecessary html elements
    for el in page.xpath(
            '//meta | //script | //style | //noscript |'
            ' //span[@class="mw-editsection"] |'
            ' //table[contains(@class,"infobox")] |'
            ' //table[contains(@class,"navbox")] |'
            ' //div[contains(@class,"metadata")] |'
            ' //div[@id="toc"] |'
            ' //div[contains(@class,"noprint")] |'
            ' //table[contains(@style,"background-color: #f9f9f9")] |'
            ' //comment() |'
            ' //table[contains(@class,"ambox")]'):
        # print(el)
        el.getparent().remove(el)

    # convert references to simple <referens>1</referens>
    for ref in page.xpath('//sup[@class="reference"]'):
        refnr = ""
        for e in ref.iter(tag='span'):
            if e.tail:
                refnr += refnr + e.tail
        [ref.remove(el) for el in ref]
        ref.text = refnr
        ref.tag = 'referens'
        ref.attrib.clear()

    # convert h1, h2 ... to rubrik1, rubrik2 ...
    for n in range(1, 6):
        for rub in page.xpath(f'//h{n}'):
            if len(rub):
                title = rub[-1].attrib.get('id', '???????').replace('_', ' ')
                rub.text = title
            [rub.remove(el) for el in rub]
            rub.tag = f'rubrik{n}'

    # Renaming simple elements
    for el in top_tag.xpath('./p'):
        el.tag = 'stycke'

    # Renaming simple elements
    for el in top_tag.xpath('.//ul'):
        el.tag = 'lista'

    # Renaming simple elements
    for el in top_tag.xpath('.//li'):
        el.tag = 'listpunkt'

    # ?????? remove some unnecessary elements TODO move to general removeal loop
    for el in page[1][1:]:
        # print(el.attrib)
        page[1].remove(el)


#   toc = page.xpath('//div[@id="toc"]')[0]
#   toctext = []
#   for el in toc.iter():
#       if el.text:
#           toctext.append(get_text(el))
#   [toc.remove(el) for el in toc]
#   for i, a in enumerate(toctext):
#       try:
#           print(a)
#           nr = float(a)
#           section = etree.Element('rubrik')
#           section.text = f"{a}. {toctext[i + 1]}"
#           toc.append(section)
#       except ValueError:
#           print("Not floating")
#   print("TOC --- ", toc, len(toc))
#   for el in page.xpath('//div[@class="thumbcaption"]'):
#       el.tag = 'bildtext'
#       el.attrib.clear()
#       link_to_text(el[0])

# Convert images to <bild> + <bildtext>
    image_urls = set()
    for el in top_tag.xpath('.//img'):
        parent = el.getparent()

        if parent is None or parent.getparent() is None:
            print("Problem reading parernt of image tag")

        # do while loop
        while parent.getparent() is not None and parent.getparent(
        ) is not top_tag:
            parent = parent.getparent()
        if parent.getparent() is None:
            top_tag.insert(-2, parent)
        idx = top_tag.index(parent)
        parent.tag = 'bild'
        for img_desc in parent.xpath('.//div[@class="thumbcaption"]'):
            img_desc.tag = 'bildtext'
            img_desc.attrib.clear()
            link_to_text(img_desc[0])
            # Add img_desc after <bild> as child of top element
            top_tag.insert(idx + 1, img_desc)
        parent.attrib.clear()
        image_urls.add(el.attrib['src'])
        # print(f"Added url: {el.attrib['src']}")
        # add_image_to_list(el.attrib['src'])
        parent.attrib['href'] = unquote(
            f"file://bilder/{el.attrib['src'].split('/')[-1]}")
        el.getparent().remove(el)
        for child in parent:
            parent.remove(child)

    #  file with list of all images

    # convert all other <a> tags to normal text
    for el in page.xpath('//a'):
        link_to_text(el)

    # Clean up reference list
    for el in top_tag.xpath('.//ol[@class="references"]'):
        for ref_el in el:
            nr = ref_el.attrib['id'][-1]
            ref_el.attrib.clear()
            link_to_text(ref_el[0])
            ref_el.text = f"{nr}.{ref_el.text}"
        el.tag = 'referenslista'
        el.attrib.clear()

    # Add to level title and change top level tag to 'artikel'
    top_tag.tag = 'artikel'
    top_tag.attrib.clear()
    top_tag.attrib['titel'] = name
    title_tag = etree.Element('rubrik1')
    title_tag.text = name
    top_tag.insert(0, title_tag)
    # print(f"LENGTH: {len(image_urls)}")
    return top_tag, image_urls
Example #57
0
    def apply(self):
        env = self.document.settings.env
        for node in self.document.traverse(docutils.nodes.reference):
            uri = node.get('refuri', '')
            if not uri:
                continue  # No URI (e.g. named reference)
            elif '://' in uri:
                continue  # Not a local link
            elif uri.startswith('#') or uri.startswith('mailto:'):
                continue  # Nothing to be done

            unquoted_uri = unquote(uri)
            for suffix in env.config.source_suffix:
                if unquoted_uri.lower().endswith(suffix.lower()):
                    target = unquoted_uri[:-len(suffix)]
                    break
            else:
                target = ''

            subsection_matches = self._subsection_re.match(uri)
            if target:
                target_ext = ''
                reftype = 'doc'
                refdomain = None
            elif subsection_matches:
                target = subsection_matches.group(1)
                target_ext = subsection_matches.group(2)
                reftype = 'ref'
                refdomain = 'std'
            else:
                file = os.path.normpath(
                    os.path.join(os.path.dirname(env.docname), unquoted_uri))
                if not os.path.isfile(os.path.join(env.srcdir, file)):
                    env.app.warn('file not found: {!r}'.format(file),
                                 env.doc2path(env.docname))
                    continue  # Link is ignored
                elif file.startswith('..'):
                    env.app.warn(
                        'link outside of source directory: {!r}'.format(file),
                        env.doc2path(env.docname))
                    continue  # Link is ignored
                if not hasattr(env, 'nbsphinx_files'):
                    env.nbsphinx_files = {}
                env.nbsphinx_files.setdefault(env.docname, []).append(file)
                continue  # We're done here

            target_docname = os.path.normpath(
                os.path.join(os.path.dirname(env.docname), target))
            if target_docname in env.found_docs:
                if target_ext:
                    target = target_docname + target_ext
                    target = target.lower()
                target = '/' + target
                linktext = node.astext()
                xref = sphinx.addnodes.pending_xref(reftype=reftype,
                                                    reftarget=target,
                                                    refdomain=refdomain,
                                                    refwarn=True,
                                                    refexplicit=True,
                                                    refdoc=env.docname)
                xref += docutils.nodes.Text(linktext, linktext)
                node.replace_self(xref)
Example #58
0
def http_interfaceListCheck(request):
    page = request.POST.get("page")
    if isInt(page):
        page = int(page)
    else:
        addUserLog(request, "单接口管理->查看用例->获取数据->页面参数不合法", "FAIL")
        return HttpResponse("<script>alert('请验证页数参数');</script>")
    checkArr = json.loads(parse.unquote(request.POST.get("checkArr")))

    orderBy = request.POST.get("orderBy")
    if isSqlInjectable(orderBy):
        addUserLog(request, "单接口管理->查看用例->获取数据->SQL注入检测时发现查询条件非法", "FAIL")
        return HttpResponse("<script>alert('查询条件非法');</script>")

    # 根据版本判断应该从哪个表里取数据 王吉亮添加于20180224
    if VersionService.isCurrentVersion(request):
        tbName = "tb_http_interface"
        versionCondition = ""
    else:
        tbName = "tb_version_http_interface"
        versionCondition = "and versionName='%s'" % request.session.get(
            "version")

    execSql = "SELECT i.*,u.userName,mu.userName modByName from %s i LEFT JOIN tb_user mu ON i.modBy = mu.loginName LEFT JOIN tb_user u ON i.addBy = u.loginName LEFT JOIN  tb_modules m ON i.moduleId = m.id LEFT JOIN tb_business_line b ON i.businessLineId = b.id WHERE 1=1 and i.state=1 %s" % (
        tbName, versionCondition)
    checkList = []
    for key in checkArr:
        if checkArr[key] == "":
            continue
        elif key == "caseFounder":
            checkList.append("%%%s%%" % checkArr[key])
            checkList.append("%%%s%%" % checkArr[key])
            execSql += """ and (i.addBy LIKE %s or u.userName LIKE %s) """
            continue
        elif key == "module":
            checkList.append("%%%s%%" % checkArr[key])
            execSql += """ and m.moduleName LIKE %s """
            continue
        elif key == "businessLine":
            checkList.append("%%%s%%" % checkArr[key])
            execSql += """ and b.bussinessLineName LIKE %s """
            continue
        elif key == "uri":
            checkList.append("%s" % checkArr[key])
            execSql += """ and i.uri= %s """
            continue
        checkList.append("%%%s%%" % checkArr[key])
        execSql += """ and i.%s """ % key
        execSql += """ LIKE %s"""
    execSql += """ ORDER BY %s""" % orderBy
    context = pagination(sqlStr=execSql,
                         attrList=checkList,
                         page=page,
                         pageNum=commonWebConfig.interFacePageNum,
                         request=request)

    response = render(
        request,
        "InterfaceTest/HTTPInterface/SubPages/HTTP_interface_list_check_page.html",
        context)
    addUserLog(request, "单接口管理->查看用例->获取数据->成功", "PASS")
    return response
Example #59
0
 def unquote_quote(segment):
     segment = unquote(segment)
     # Tilde is part of RFC3986 Unreserved Characters
     # https://tools.ietf.org/html/rfc3986#section-2.3
     # See also https://bugs.python.org/issue16285
     return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + "~")
Example #60
0
        'format', 'version', 'mime', 'basis', 'warning'
    ]
    csvWriter.writerow(header)
    with open(infile, 'r', encoding='utf8') as f2:
        csvReader = csv.reader(f2)
        next(csvReader)
        for row in csvReader:
            counter += 1
            print('\rWorking on row %d' % counter, end='')

            if 'zip:file:' in row[2]:
                filename = row[2].split('zip:file:/', 1)[1].replace(
                    '.zip!', '.zip#').replace('/', '\\')
            else:
                filename = row[2].split('file:/', 1)[1]
            filename = unquote(filename)
            filesize = row[7]
            modified = row[10]
            errors = ''
            namespace = 'pronom'
            if row[14] == "":
                id = 'UNKNOWN'
            else:
                id = row[14]
            format = row[16]
            version = row[17]
            mime = row[15]
            basis = ''
            if row[11].lower() == 'true':
                warning = 'extension mismatch'
            else: