예제 #1
0
def build_url_with_next(redirect_url, next_url):
    resolved_url = resolve_url(redirect_url)
    login_url_parts = list(urlparse(resolved_url))
    querystring = QueryDict(login_url_parts[4], mutable=True)
    querystring[settings.SSO_PROXY_REDIRECT_FIELD_NAME] = next_url
    login_url_parts[4] = querystring.urlencode(safe='/')
    return urlunparse(login_url_parts)
예제 #2
0
파일: utils.py 프로젝트: melody40/monorepo
def purge_urls_from_cache(urls, backend_settings=None, backends=None):
    # Convert each url to urls one for each managed language (WAGTAILFRONTENDCACHE_LANGUAGES setting).
    # The managed languages are common to all the defined backends.
    # This depends on settings.USE_I18N
    languages = getattr(settings, 'WAGTAILFRONTENDCACHE_LANGUAGES', [])
    if settings.USE_I18N and languages:
        langs_regex = "^/(%s)/" % "|".join(languages)
        new_urls = []

        # Purge the given url for each managed language
        for isocode, description in languages:
            for url in urls:
                up = urlparse(url)
                new_url = urlunparse(
                    (up.scheme, up.netloc,
                     re.sub(langs_regex, "/%s/" % isocode,
                            up.path), up.params, up.query, up.fragment))

                # Check for best performance. True if re.sub found no match
                # It happens when i18n_patterns was not used in urls.py to serve content for different languages from different URLs
                if new_url in new_urls:
                    continue

                new_urls.append(new_url)

        urls = new_urls

    for backend_name, backend in get_backends(backend_settings,
                                              backends).items():
        for url in urls:
            logger.info("[%s] Purging URL: %s", backend_name, url)

        backend.purge_batch(urls)
예제 #3
0
def build_html_iframe(response, url_params=None, iframe_attrs=None):
    html = response.get('html', '')

    if url_params is None:
        url_params = {}

    if iframe_attrs is None:
        iframe_attrs = {}

    if html:
        # What follows is a pretty nasty looking "hack"
        # oEmbed hss not implemented some parameters
        # and so for these we need to add them manually to the iframe.
        html = BeautifulSoup(html).iframe

        data_url = response.get('player_url', html['src'])
        player_url = urlparse(data_url)

        queries = parse_qs(player_url.query)
        url_params.update(queries)

        url_parts = list(player_url)
        url_parts[4] = urlencode(url_params, True)

        html['src'] = urlunparse(url_parts)

        for key, value in iframe_attrs.items():
            if value:
                html[key] = value
    return str(html)
  def methodNext(self, previous_request, previous_response):
    """Retrieves the next page of results.

Args:
  previous_request: The request for the previous page. (required)
  previous_response: The response from the request for the previous page. (required)

Returns:
  A request object that you can call 'execute()' on to request the next
  page. Returns None if there are no more items in the collection.
    """
    # Retrieve nextPageToken from previous_response
    # Use as pageToken in previous_request to create new request.

    if 'nextPageToken' not in previous_response:
      return None

    request = copy.copy(previous_request)

    pageToken = previous_response['nextPageToken']
    parsed = list(urlparse(request.uri))
    q = parse_qsl(parsed[4])

    # Find and remove old 'pageToken' value from URI
    newq = [(key, value) for (key, value) in q if key != 'pageToken']
    newq.append(('pageToken', pageToken))
    parsed[4] = urlencode(newq)
    uri = urlunparse(parsed)

    request.uri = uri

    logger.info('URL being requested: %s %s' % (methodName,uri))

    return request
예제 #5
0
def extend_qs(base_url, **kwargs):
    """
    Extend querystring of the URL with kwargs, taking care of python types.

    - True is converted to "1"
    - When a value is equal to False or None, then corresponding key is removed
      from the querystring at all. Please note that empty strings and numeric
      zeroes are not equal to False here.
    - Unicode is converted to utf-8 string
    - Everything else is converted to string using str(obj)

    For instance:

    >>> extend_querystring('/foo/?a=b', c='d', e=True, f=False)
    '/foo/?a=b&c=d&e=1'
    """
    parsed = parse.urlparse(base_url)
    query = dict(parse.parse_qsl(parsed.query))
    for key, value in kwargs.items():
        value = convert_to_string(value)
        if value is None:
            query.pop(key, None)
        else:
            query[key] = value
    query_str = parse.urlencode(query)
    parsed_as_list = list(parsed)
    parsed_as_list[4] = query_str
    return parse.urlunparse(parsed_as_list)
예제 #6
0
    def post(self, request, *args, **kwargs):
        #pylint:disable=unused-argument
        uploaded_file = request.data.get('file')
        if not uploaded_file:
            return Response({'details': "no location or file specified."},
                            status=status.HTTP_400_BAD_REQUEST)

        # tentatively extract file extension.
        parts = os.path.splitext(
            force_text(uploaded_file.name.replace('\\', '/')))
        ext = parts[-1].lower() if len(parts) > 1 else ""
        key_name = "%s%s" % (hashlib.sha256(
            uploaded_file.read()).hexdigest(), ext)
        default_storage = get_picture_storage(request)

        location = default_storage.url(
            default_storage.save(key_name, uploaded_file))
        # We are removing the query parameters, as they contain
        # signature information, not the relevant URL location.
        parts = urlparse(location)
        location = urlunparse(
            (parts.scheme, parts.netloc, parts.path, "", "", ""))
        location = self.request.build_absolute_uri(location)

        self.organization.picture = location
        self.organization.save()
        return Response({'location': location}, status=status.HTTP_201_CREATED)
예제 #7
0
파일: admin_urls.py 프로젝트: yfcheung/myrg
def add_preserved_filters(context, url, popup=False, to_field=None):
    opts = context.get('opts')
    preserved_filters = context.get('preserved_filters')

    parsed_url = list(urlparse(url))
    parsed_qs = dict(parse_qsl(parsed_url[4]))
    merged_qs = dict()

    if opts and preserved_filters:
        preserved_filters = dict(parse_qsl(preserved_filters))

        match_url = '/%s' % url.partition(get_script_prefix())[2]
        try:
            match = resolve(match_url)
        except Resolver404:
            pass
        else:
            current_url = '%s:%s' % (match.app_name, match.url_name)
            changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
            if changelist_url == current_url and '_changelist_filters' in preserved_filters:
                preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))

        merged_qs.update(preserved_filters)

    if popup:
        from myrobogals.admin.options import IS_POPUP_VAR
        merged_qs[IS_POPUP_VAR] = 1
    if to_field:
        from myrobogals.admin.options import TO_FIELD_VAR
        merged_qs[TO_FIELD_VAR] = to_field

    merged_qs.update(parsed_qs)

    parsed_url[4] = urlencode(merged_qs)
    return urlunparse(parsed_url)
예제 #8
0
    def purge(self, url):
        url_parsed = urlparse(url)
        host = url_parsed.hostname

        # Append port to host if it is set in the original URL
        if url_parsed.port:
            host += (':' + str(url_parsed.port))

        request = PurgeRequest(url=urlunparse([
            self.cache_scheme, self.cache_netloc, url_parsed.path,
            url_parsed.params, url_parsed.query, url_parsed.fragment
        ]),
                               headers={
                                   'Host':
                                   host,
                                   'User-Agent':
                                   'Wagtail-frontendcache/' + __version__
                               })

        try:
            urlopen(request)
        except HTTPError as e:
            logger.error(
                "Couldn't purge '%s' from HTTP cache. HTTPError: %d %s", url,
                e.code, e.reason)
        except URLError as e:
            logger.error("Couldn't purge '%s' from HTTP cache. URLError: %s",
                         url, e.reason)
예제 #9
0
def build_html_iframe(response, url_params=None, iframe_attrs=None):
    html = response.get('html', '')

    if url_params is None:
        url_params = {}

    if iframe_attrs is None:
        iframe_attrs = {}

    if html:
        # What follows is a pretty nasty looking "hack"
        # oEmbed hss not implemented some parameters
        # and so for these we need to add them manually to the iframe.
        html = BeautifulSoup(html).iframe

        data_url = response.get('player_url', html['src'])
        player_url = urlparse(data_url)

        queries = parse_qs(player_url.query)
        url_params.update(queries)

        url_parts = list(player_url)
        url_parts[4] = urlencode(url_params, True)

        html['src'] = urlunparse(url_parts)

        for key, value in iframe_attrs.iteritems():
            if value:
                html[key] = value
    return unicode(html)
예제 #10
0
def absolutify(url, site=None):
    """
    Joins a base ``Site`` URL with a URL path.

    If no site provided it gets the current site from Site.

    """
    if url.startswith('http'):
        return url

    if not site:
        site = Site.objects.get_current()

    parts = urlsplit(url)

    scheme = 'https'
    netloc = site.domain
    path = parts.path
    query = parts.query
    fragment = parts.fragment

    if path == '':
        path = '/'

    return urlunparse([scheme, netloc, path, None, query, fragment])
예제 #11
0
def url_replace_param(url, name, value):
    """
    Replace a GET parameter in an URL
    """
    url_components = urlparse(force_str(url))

    params = parse_qs(url_components.query)

    if value is None:
        del params[name]
    else:
        params[name] = value

    return mark_safe(
        urlunparse(
            [
                url_components.scheme,
                url_components.netloc,
                url_components.path,
                url_components.params,
                urlencode(params, doseq=True),
                url_components.fragment,
            ]
        )
    )
예제 #12
0
파일: backends.py 프로젝트: DimiC/wagtail
    def purge(self, url):
        url_parsed = urlparse(url)
        host = url_parsed.hostname

        # Append port to host if it is set in the original URL
        if url_parsed.port:
            host += (':' + str(url_parsed.port))

        request = PurgeRequest(
            url=urlunparse([
                self.cache_scheme,
                self.cache_netloc,
                url_parsed.path,
                url_parsed.params,
                url_parsed.query,
                url_parsed.fragment
            ]),
            headers={
                'Host': host,
                'User-Agent': 'Wagtail-frontendcache/' + __version__
            }
        )

        try:
            urlopen(request)
        except HTTPError as e:
            logger.error("Couldn't purge '%s' from HTTP cache. HTTPError: %d %s", url, e.code, e.reason)
        except URLError as e:
            logger.error("Couldn't purge '%s' from HTTP cache. URLError: %s", url, e.reason)
예제 #13
0
def add_preserved_filters(context, url, popup=False, to_field=None):
    opts = context.get('opts')
    preserved_filters = context.get('preserved_filters')

    parsed_url = list(urlparse(url))
    parsed_qs = dict(parse_qsl(parsed_url[4]))
    merged_qs = dict()

    if opts and preserved_filters:
        preserved_filters = dict(parse_qsl(preserved_filters))

        match_url = '/%s' % url.partition(get_script_prefix())[2]
        try:
            match = resolve(match_url)
        except Resolver404:
            pass
        else:
            current_url = '%s:%s' % (match.app_name, match.url_name)
            changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
            if changelist_url == current_url and '_changelist_filters' in preserved_filters:
                preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))

        merged_qs.update(preserved_filters)

    if popup:
        from django.contrib.admin.options import IS_POPUP_VAR
        merged_qs[IS_POPUP_VAR] = 1
    if to_field:
        from django.contrib.admin.options import TO_FIELD_VAR
        merged_qs[TO_FIELD_VAR] = to_field

    merged_qs.update(parsed_qs)

    parsed_url[4] = urlencode(merged_qs)
    return urlunparse(parsed_url)
예제 #14
0
def add_affiliate_code(url, aid_code):
    parsed = urlparse(str(url))
    query = dict(parse_qsl(parsed.query))
    query.update({app_settings.PARAM_NAME: str(aid_code)})
    url_parts = list(parsed)
    url_parts[4] = urlencode(query)
    return urlunparse(url_parts)
예제 #15
0
def extend_qs(base_url, **kwargs):
    """
    Extend querystring of the URL with kwargs, taking care of python types.

    - True is converted to "1"
    - When a value is equal to False or None, then corresponding key is removed
      from the querystring at all. Please note that empty strings and numeric
      zeroes are not equal to False here.
    - Unicode is converted to utf-8 string
    - Everything else is converted to string using str(obj)

    For instance:

    >>> extend_querystring('/foo/?a=b', c='d', e=True, f=False)
    '/foo/?a=b&c=d&e=1'
    """
    parsed = parse.urlparse(base_url)
    query = dict(parse.parse_qsl(parsed.query))
    for key, value in kwargs.items():
        value = convert_to_string(value)
        if value is None:
            query.pop(key, None)
        else:
            query[key] = value
    query_str = parse.urlencode(query)
    parsed_as_list = list(parsed)
    parsed_as_list[4] = query_str
    return parse.urlunparse(parsed_as_list)
예제 #16
0
def build_url_with_next(redirect_url, next_url):
    resolved_url = resolve_url(redirect_url)
    login_url_parts = list(urlparse(resolved_url))
    querystring = QueryDict(login_url_parts[4], mutable=True)
    querystring[settings.SSO_PROXY_REDIRECT_FIELD_NAME] = next_url
    login_url_parts[4] = querystring.urlencode(safe='/')
    return urlunparse(login_url_parts)
예제 #17
0
def remove_affiliate_code(url):
    parsed = urlparse(str(url))
    query = dict(parse_qsl(parsed.query))
    query.pop(app_settings.PARAM_NAME, None)
    url_parts = list(parsed)
    url_parts[4] = urlencode(query)
    return urlunparse(url_parts)
예제 #18
0
 def clean_url(self):
     data = self.cleaned_data['url']
     scheme, netloc, path, params, query, fragment = urlparse(data)
     if '%' not in path:
         path = quote(path)
     cleaned = urlunparse((scheme, netloc, path, '', '', ''))
     validate_mdn_url(cleaned)
     return cleaned
예제 #19
0
 def clean_url(self):
     data = self.cleaned_data['url']
     scheme, netloc, path, params, query, fragment = urlparse(data)
     if "%" not in path:
         path = quote(path)
     cleaned = urlunparse((scheme, netloc, path, '', '', ''))
     validate_mdn_url(cleaned)
     return cleaned
예제 #20
0
 def __init__(self, *args, **kwargs):
     instance = kwargs.get('instance')
     if instance:
         videoid = instance.glossary.get('videoid')
         if videoid:
             parts = ParseResult('https', 'youtu.be', videoid, '', '', '')
             initial = {'url': urlunparse(parts)}
             kwargs.update(initial=initial)
     super(BootstrapYoutubeForm, self).__init__(*args, **kwargs)
예제 #21
0
 def __init__(self, *args, **kwargs):
     instance = kwargs.get('instance')
     if instance:
         videoid = instance.glossary.get('videoid')
         if videoid:
             parts = ParseResult('https', 'youtu.be', videoid, '', '', '')
             initial = {'url': urlunparse(parts)}
             kwargs.update(initial=initial)
     super(BootstrapYoutubeForm, self).__init__(*args, **kwargs)
 def login(self, request, extra_context=None):
     """Redirect to the project-level login page."""
     resolved_url = resolve_url(settings.LOGIN_URL)
     login_url_parts = list(urlparse(resolved_url))
     if REDIRECT_FIELD_NAME in request.GET:
         querystring = QueryDict(login_url_parts[4], mutable=True)
         querystring[REDIRECT_FIELD_NAME] = request.GET[REDIRECT_FIELD_NAME]
         login_url_parts[4] = querystring.urlencode(safe='/')
     return HttpResponsePermanentRedirect(urlunparse(login_url_parts))
예제 #23
0
    def convert_to_relationship_object(self,
                                       name,
                                       raw_id,
                                       field_data,
                                       resource_uri,
                                       include_links=True):
        """Convert from IDs to a relationship object.

        Partially implements the full spec at:
        http://jsonapi.org/format/#document-resource-object-relationships

        Expecting raw_id to be one of:
        - None (an empty to-one link)
        - A single ID (a to-one link)
        - An empty array (an empty to-many link)
        - An array of one or more IDs (a to-many link)
        The format of raw_id should agree with field_data['link']

        Return is a relationship object, such as this (include_links=True):
        {
            "data": {
                "type": "features",
                "id": "1",
            },
            "links": {
                "self": "/api/v2/features/3/relationships/parent",
                "related": "/api/v2/features/3/parent",
            },
        }
        """
        relationship = self.dict_class()
        if include_links:
            # TODO: Use reverse instead of concat to construct links
            attr_name = field_data.get('name', name)
            endpoint = field_data.get('singular', attr_name)
            scheme, netloc, path, params, query, fragment = urlparse(
                resource_uri)
            base_uri = urlunparse((scheme, netloc, path, '', '', ''))
            relationship['links'] = self.dict_class((
                ('self', base_uri + '/relationships/' + endpoint),
                ('related', base_uri + '/' + endpoint),
            ))

        link = field_data['link']
        resource = field_data.get('resource', name)
        if link in ('from_many', 'to_many'):
            data = [
                self.dict_class((('type', resource), ('id', force_text(pk))))
                for pk in raw_id
            ]
        elif raw_id is None:
            data = None
        else:
            data = self.dict_class(
                (('type', resource), ('id', force_text(raw_id))))
        relationship['data'] = data
        return relationship
예제 #24
0
 def login(self, request, extra_context=None):
     """Redirect to the project-level login page."""
     resolved_url = resolve_url(settings.LOGIN_URL)
     login_url_parts = list(urlparse(resolved_url))
     if REDIRECT_FIELD_NAME in request.GET:
         querystring = QueryDict(login_url_parts[4], mutable=True)
         querystring[REDIRECT_FIELD_NAME] = request.GET[REDIRECT_FIELD_NAME]
         login_url_parts[4] = querystring.urlencode(safe='/')
     return HttpResponsePermanentRedirect(urlunparse(login_url_parts))
예제 #25
0
    def put(self, request, *args, **kwargs):
        """
        Updates meta tags on assets.

        **Examples

        .. code-block:: http

            PUT /api/assets/ HTTP/1.1

        .. code-block:: json

            {
                items: [
                    {location: "/media/item/url1.jpg"},
                    {location: "/media/item/url2.jpg"},
                    ....
                ],
                tags: ['photo', 'homepage']
            }

        When the API returns, both assets file listed in items will be tagged
        with 'photo' and 'homepage'. Those tags can then be used later on
        in searches.
        """
        #pylint: disable=unused-argument
        serializer = MediaItemListSerializer(data=request.data)
        serializer.is_valid(raise_exception=True)

        assets, total_count = self.list_media(
            get_default_storage(self.request, self.account),
            self.build_filter_list(serializer.validated_data))
        if not assets:
            return Response({}, status=status.HTTP_404_NOT_FOUND)

        tags = [tag for tag in serializer.validated_data.get('tags') if tag]
        for item in assets:
            parts = urlparse(item['location'])
            location = urlunparse(
                (parts.scheme, parts.netloc, parts.path, None, None, None))
            media_tags = MediaTag.objects.filter(location=location)
            for tag in tags:
                MediaTag.objects.get_or_create(location=location, tag=tag)
            # Remove tags which are no more set for the location.
            media_tags.exclude(tag__in=tags).delete()

            # Update tags returned by the API.
            item['tags'] = ",".join(
                list(
                    MediaTag.objects.filter(location=location).values_list(
                        'tag', flat=True)))

        serializer = self.get_serializer(sorted(assets,
                                                key=lambda x: x['updated_at']),
                                         many=True)
        return self.get_paginated_response(serializer.data)
예제 #26
0
파일: views.py 프로젝트: rutube/alco
 def get(self, request, *args, **kwargs):
     if not request.GET.get('start_ts'):
         url = parse.urlparse(request.path)
         query_string = dict(parse.parse_qsl(request.META['QUERY_STRING']))
         start_ts = datetime.now().replace(microsecond=0) - timedelta(minutes=5)
         query_string['start_ts'] = start_ts.isoformat(sep=' ')
         query = parse.urlencode(query_string).replace('+', '%20')
         url = url._replace(query=query)
         return HttpResponseRedirect(parse.urlunparse(url))
     return super(GrepView, self).get(request, *args, **kwargs)
예제 #27
0
def strip_path(url):
    """srips path, params and hash fragments of the url"""
    purl = urlparse(url)
    return urlunparse(
        ParseResult(
            purl.scheme,
            purl.netloc,
            '', '', '', ''
        )
    )
예제 #28
0
def sanitize_github_url(requirement, url):
    if url.netloc == 'github.com':
        split_path = list(filter(None, url.path.split('/')))
        if len(split_path) > 4:  # github.com/<user>/<repo>/blob/<branch>
            if split_path[2] == 'blob':
                split_path[2] = 'raw'
            path = '/' + '/'.join(split_path)
            requirement = urlunparse((url.scheme, url.netloc, path,
                                      url.params, url.query, url.fragment))
    return requirement
예제 #29
0
 def url_to_template(self, view_name, request, template_name):
     prefix, resolver = get_resolver(None).namespace_dict['v1']
     info = resolver.reverse_dict[view_name]
     path_template = info[0][0][0]
     id_field = info[0][0][1][0]
     path = prefix + path_template % {id_field: '{%s}' % template_name}
     parsed_url = urlparse(request.build_absolute_uri())
     return urlunparse(
         [parsed_url.scheme, parsed_url.netloc, path, '', '', '']
     )
예제 #30
0
def sanitize_github_url(requirement, url):
    if url.netloc == 'github.com':
        split_path = list(filter(None, url.path.split('/')))
        if len(split_path) > 4:  # github.com/<user>/<repo>/blob/<branch>
            if split_path[2] == 'blob':
                split_path[2] = 'raw'
            path = '/' + '/'.join(split_path)
            requirement = urlunparse((url.scheme, url.netloc, path,
                                      url.params, url.query, url.fragment))
    return requirement
예제 #31
0
 def get_url_path(self):
     if hasattr(self.get_url, "dont_recurse"):
         raise NotImplementedError
     try:
         url = self.get_url()
     except NotImplementedError:
         raise
     # bits = urlparse.urlparse(url)
     # return urlparse.urlunparse(("", "") + bits[2:])
     bits = urlparse(url)
     return urlunparse(("", "") + bits[2:])
예제 #32
0
    def redirect_to_sso(self, next_url):
        """
        Redirects the user to the sso signup page, passing the 'next' page
        """
        resolved_url = resolve_url(self.sso_redirect_url)
        login_url_parts = list(urlparse(resolved_url))
        querystring = QueryDict(login_url_parts[4], mutable=True)
        querystring[settings.SSO_REDIRECT_FIELD_NAME] = next_url
        login_url_parts[4] = querystring.urlencode(safe='/')

        return HttpResponseRedirect(urlunparse(login_url_parts))
예제 #33
0
파일: views.py 프로젝트: yfix/alco
 def get(self, request, *args, **kwargs):
     if not request.GET.get('start_ts'):
         url = parse.urlparse(request.path)
         query_string = dict(parse.parse_qsl(request.META['QUERY_STRING']))
         start_ts = datetime.now().replace(microsecond=0) - timedelta(
             minutes=5)
         query_string['start_ts'] = start_ts.isoformat(sep=' ')
         query = parse.urlencode(query_string).replace('+', '%20')
         url = url._replace(query=query)
         return HttpResponseRedirect(parse.urlunparse(url))
     return super(GrepView, self).get(request, *args, **kwargs)
예제 #34
0
 def render(self, context, instance, placeholder):
     context = self.super(BootstrapYoutubePlugin, self).render(context, instance, placeholder)
     query_params = ['autoplay', 'controls', 'loop', 'rel']
     videoid = instance.glossary.get('videoid')
     if videoid:
         query = ['{}=1'.format(key) for key in query_params if instance.glossary.get(key)]
         parts = ParseResult('https', 'www.youtube.com', '/embed/' + videoid, '', '&'.join(query), '')
         context.update({
             'youtube_url': urlunparse(parts),
             'allowfullscreen': 'allowfullscreen' if instance.glossary.get('allow_fullscreen') else '',
         })
     return context
예제 #35
0
    def put(self, request, *args, **kwargs):
        """
        Updates meta tags on assets.

        **Examples

        .. code-block:: http

            PUT /api/assets/ HTTP/1.1

        .. code-block:: json

            {
                items: [
                    {location: "/media/item/url1.jpg"},
                    {location: "/media/item/url2.jpg"},
                    ....
                ],
                tags: ['photo', 'homepage']
            }

        When the API returns, both assets file listed in items will be tagged
        with 'photo' and 'homepage'. Those tags can then be used later on
        in searches.
        """
        #pylint: disable=unused-argument
        serializer = MediaItemListSerializer(data=request.data)
        serializer.is_valid(raise_exception=True)

        assets, total_count = self.list_media(
            get_default_storage(self.request, self.account),
            self.build_filter_list(serializer.validated_data))
        if not assets:
            return Response({}, status=status.HTTP_404_NOT_FOUND)

        tags = [tag for tag in serializer.validated_data.get('tags') if tag]
        for item in assets:
            parts = urlparse(item['location'])
            location = urlunparse((parts.scheme, parts.netloc, parts.path,
                None, None, None))
            media_tags = MediaTag.objects.filter(location=location)
            for tag in tags:
                MediaTag.objects.get_or_create(location=location, tag=tag)
            # Remove tags which are no more set for the location.
            media_tags.exclude(tag__in=tags).delete()

            # Update tags returned by the API.
            item['tags'] = ",".join(list(MediaTag.objects.filter(
                location=location).values_list('tag', flat=True)))

        serializer = self.get_serializer(
            sorted(assets, key=lambda x: x['updated_at']), many=True)
        return self.get_paginated_response(serializer.data)
예제 #36
0
    def redirect_to_login(self, next, login_url=None):
        """
        Redirects the user to the login page, passing the given 'next' page
        """
        resolved_url = resolve_url(login_url or settings.LOGIN_URL)

        login_url_parts = list(urlparse(resolved_url))
        querystring = QueryDict(login_url_parts[4], mutable=True)
        querystring[REDIRECT_FIELD_NAME] = next
        login_url_parts[4] = querystring.urlencode(safe='/')

        return HttpResponseRedirect(urlunparse(login_url_parts))
예제 #37
0
    def url_to_template(self, view_name, request, template_name):
        resolver = urlresolvers.get_resolver(None)
        info = resolver.reverse_dict[view_name]

        path_template = info[0][0][0]
        # FIXME: what happens when URL has more than one dynamic values?
        # e.g. nested relations: manufacturer/%(id)s/cars/%(card_id)s
        path = path_template % {info[0][0][1][0]: "{%s}" % template_name}

        parsed_url = urlparse(request.build_absolute_uri())

        return urlunparse([parsed_url.scheme, parsed_url.netloc, path, "", "", ""])
예제 #38
0
 def render(self, context, instance, placeholder):
     query_params = ['autoplay', 'controls', 'loop', 'rel']
     videoid = instance.glossary.get('videoid')
     if videoid:
         query = ['{}=1'.format(key) for key in query_params if instance.glossary.get(key)]
         parts = ParseResult('https', 'www.youtube.com', '/embed/' + videoid, '', '&'.join(query), '')
         context.update({
             'instance': instance,
             'youtube_url': urlunparse(parts),
             'allowfullscreen': 'allowfullscreen' if instance.glossary.get('allow_fullscreen') else '',
         })
     return context
예제 #39
0
    def redirect_to_login(self, next, login_url=None):
        """
        Redirects the user to the login page, passing the given 'next' page
        """
        resolved_url = resolve_url(login_url or settings.LOGIN_URL)

        login_url_parts = list(urlparse(resolved_url))
        querystring = QueryDict(login_url_parts[4], mutable=True)
        querystring[REDIRECT_FIELD_NAME] = next
        login_url_parts[4] = querystring.urlencode(safe='/')

        return HttpResponseRedirect(urlunparse(login_url_parts))
예제 #40
0
def _quote_url(url):
    """Ensures non-ascii URL paths are quoted"""
    parsed = urlparse(url)
    try:
        parsed.path.encode('ascii')
    except UnicodeError:
        path = quote(parsed.path.encode('utf-8'))
    else:
        path = parsed.path
    quoted = (parsed.scheme, parsed.netloc, path, parsed.params, parsed.query,
              parsed.fragment)
    return urlunparse(quoted)
예제 #41
0
    def convert_to_relationship_object(
            self, name, raw_id, field_data, resource_uri, include_links=True):
        """Convert from IDs to a relationship object.

        Partially implements the full spec at:
        http://jsonapi.org/format/#document-resource-object-relationships

        Expecting raw_id to be one of:
        - None (an empty to-one link)
        - A single ID (a to-one link)
        - An empty array (an empty to-many link)
        - An array of one or more IDs (a to-many link)
        The format of raw_id should agree with field_data['link']

        Return is a relationship object, such as this (include_links=True):
        {
            "data": {
                "type": "features",
                "id": "1",
            },
            "links": {
                "self": "/api/v2/features/3/relationships/parent",
                "related": "/api/v2/features/3/parent",
            },
        }
        """
        relationship = self.dict_class()
        if include_links:
            # TODO: Use reverse instead of concat to construct links
            attr_name = field_data.get('name', name)
            endpoint = field_data.get('singular', attr_name)
            scheme, netloc, path, params, query, fragment = urlparse(
                resource_uri)
            base_uri = urlunparse((scheme, netloc, path, '', '', ''))
            relationship['links'] = self.dict_class((
                ('self', base_uri + '/relationships/' + endpoint),
                ('related', base_uri + '/' + endpoint),
            ))

        link = field_data['link']
        resource = field_data.get('resource', name)
        if link in ('from_many', 'to_many'):
            data = [
                self.dict_class((('type', resource), ('id', force_text(pk))))
                for pk in raw_id]
        elif raw_id is None:
            data = None
        else:
            data = self.dict_class(
                (('type', resource), ('id', force_text(raw_id))))
        relationship['data'] = data
        return relationship
예제 #42
0
    def url_to_template(self, view_name, request, template_name):
        resolver = urlresolvers.get_resolver(None)
        info = resolver.reverse_dict[view_name]

        path_template = info[0][0][0]
        # FIXME: what happens when URL has more than one dynamic values?
        # e.g. nested relations: manufacturer/%(id)s/cars/%(card_id)s
        path = path_template % {info[0][0][1][0]: '{%s}' % template_name}

        parsed_url = urlparse(request.build_absolute_uri())

        return urlunparse(
            [parsed_url.scheme, parsed_url.netloc, path, '', '', ''])
예제 #43
0
파일: views.py 프로젝트: rutube/alco
    def get_redirect_url(self, *args, **kwargs):
        obj = self.get_object()
        url = parse.urlparse(obj.url)
        query_params = copy(self.request.GET)
        lookup = self.kwargs.get('default_value')
        if lookup and obj.default_field:
            query_params[obj.default_field.name] = lookup

        query_params.update(**dict(parse.parse_qsl(url.query)))

        url = reverse('grep_view', kwargs={'name': obj.index.name})
        url = parse.urlparse(url)
        parts = list(url)
        parts[4] = parse.urlencode(query_params)
        return parse.urlunparse(parts)
예제 #44
0
파일: views.py 프로젝트: yfix/alco
    def get_redirect_url(self, *args, **kwargs):
        obj = self.get_object()
        url = parse.urlparse(obj.url)
        query_params = copy(self.request.GET)
        lookup = self.kwargs.get('default_value')
        if lookup and obj.default_field:
            query_params[obj.default_field.name] = lookup

        query_params.update(**dict(parse.parse_qsl(url.query)))

        url = reverse('grep_view', kwargs={'name': obj.index.name})
        url = parse.urlparse(url)
        parts = list(url)
        parts[4] = parse.urlencode(query_params)
        return parse.urlunparse(parts)
예제 #45
0
 def validate_redirect_url(next_url):
     """
     Returns the next_url path if next_url matches allowed hosts.
     """
     if not next_url:
         return None
     parts = urlparse(next_url)
     if parts.netloc:
         domain, _ = split_domain_port(parts.netloc)
         allowed_hosts = (['*'] if django_settings.DEBUG
             else django_settings.ALLOWED_HOSTS)
         if not (domain and validate_host(domain, allowed_hosts)):
             return None
     return urlunparse(("", "", parts.path,
         parts.params, parts.query, parts.fragment))
예제 #46
0
def absolutify(url):
    """Joins settings.SITE_URL with a URL path."""
    if url.startswith('http'):
        return url

    site = urlsplit(settings.SITE_URL)
    parts = urlsplit(url)
    scheme = site.scheme
    netloc = site.netloc
    path = parts.path
    query = parts.query
    fragment = parts.fragment

    if path == '':
        path = '/'

    return urlunparse([scheme, netloc, path, None, query, fragment])
 def url(self, name):
     obj = self._get_object(name)
     url = self.driver.get_object_cdn_url(obj)
     if self.secure:
         provider_type = self.provider['type'].lower()
         if 'cloudfiles' in provider_type:
             parsed_url = urlparse(url)
             if parsed_url.scheme != 'http':
                 return url
             split_netloc = parsed_url.netloc.split('.')
             split_netloc[1] = 'ssl'
             url = urlunparse('https', '.'.join(split_netloc), parsed_url.path,
                              parsed_url.params, parsed_url.query,
                              parsed_url.fragment)
         if 's3' in provider_type:
             url = url.replace('http://', 'https://')
     return url
예제 #48
0
def build_redirect_url(next_url, current_url, redirect_field_name,
                       extra_args=None):
    """
    即将访问的 CUR_URL 页面, 加上下一步要跳转的 NEXT 页面
    @param {string} next_url 页面链接,比如 http://a.com/page1/
    @param {string} current_url
    """
    resolved_url = resolve_url(current_url)

    login_url_parts = list(urlparse(resolved_url))

    querystring = QueryDict(login_url_parts[4], mutable=True)
    querystring[redirect_field_name] = next_url

    if extra_args:
        querystring.update(extra_args)

    login_url_parts[4] = querystring.urlencode(safe='/')

    return urlunparse(login_url_parts)
예제 #49
0
def url_replace_param(url, name, value):
    """
    Replace a GET parameter in an URL
    """
    url_components = urlparse(force_str(url))

    params = parse_qs(url_components.query)

    if value is None:
        del params[name]
    else:
        params[name] = value

    return mark_safe(urlunparse([
        url_components.scheme,
        url_components.netloc,
        url_components.path,
        url_components.params,
        urlencode(params, doseq=True),
        url_components.fragment,
    ]))
예제 #50
0
 def url(self, name):
     provider_type = self.provider['type'].lower()
     obj = self._get_object(name)
     if not obj:
         return None
     try:
         # currently only Cloudfiles supports it
         url = self.driver.get_object_cdn_url(obj)
     except NotImplementedError as e:
         object_path = '%s/%s' % (self.bucket, obj.name)
         if 's3' in provider_type:
             base_url = 'http://%s' % self.driver.connection.host
             url = urljoin(base_url, object_path)
         elif 'google' in provider_type:
             url = urljoin('http://storage.googleapis.com', object_path)
         elif 'azure' in provider_type:
             base_url = ('http://%s.blob.core.windows.net' %
                         self.provider['user'])
             url = urljoin(base_url, object_path)
         else:
             raise e
     if self.secure:
         if 'cloudfiles' in provider_type:
             parsed_url = urlparse(url)
             if parsed_url.scheme != 'http':
                 return url
             split_netloc = parsed_url.netloc.split('.')
             split_netloc[1] = 'ssl'
             url = urlunparse(
                 'https',
                 '.'.join(split_netloc),
                 parsed_url.path,
                 parsed_url.params, parsed_url.query,
                 parsed_url.fragment
             )
         if ('s3' in provider_type or
                 'google' in provider_type or
                 'azure' in provider_type):
             url = url.replace('http://', 'https://')
     return url
예제 #51
0
def absolutify(url, for_wiki_site=False):
    """Joins settings.SITE_URL with a URL path."""
    if url.startswith('http'):
        return url

    if for_wiki_site:
        site_url = settings.WIKI_SITE_URL
    else:
        site_url = settings.SITE_URL

    site = urlsplit(site_url)
    parts = urlsplit(url)
    scheme = site.scheme
    netloc = site.netloc
    path = parts.path
    query = parts.query
    fragment = parts.fragment

    if path == '':
        path = '/'

    return urlunparse([scheme, netloc, path, None, query, fragment])
예제 #52
0
def store_file(file_object, file_name_prefix=''):
    """Creates an instance of django's file storage
    object based on the file-like object,
    returns the storage object, file name, file url
    """
    file_ext = os.path.splitext(file_object.name)[1].lower()
    file_name = make_file_name(file_ext, file_name_prefix)
    file_storage = get_storage_class()()
    # use default storage to store file
    file_storage.save(file_name, file_object)

    file_url = file_storage.url(file_name)
    parsed_url = urlparse(file_url)
    file_url = urlunparse(
        ParseResult(
            parsed_url.scheme,
            parsed_url.netloc,
            parsed_url.path,
            '', '', ''
        )
    )

    return file_storage, file_name, file_url
예제 #53
0
def add_preserved_filters(context, url, popup=False, to_field=None):
    opts = context.get('opts')
    preserved_filters = context.get('preserved_filters')

    parsed_url = list(urlparse(url))
    parsed_qs = dict(parse_qsl(parsed_url[4]))
    merged_qs = dict()

    if opts and preserved_filters:
        preserved_filters = dict(parse_qsl(preserved_filters))

        match_url = '/{0}'.format(url.partition(get_script_prefix())[2])
        try:
            match = resolve(match_url)
        except Resolver404:
            pass
        else:
            current_url = '{0}:{1}'.format(match.app_name, match.url_name)
            changelist_url = 'desk:{0}_{1}_changelist'.format(
                opts.app_label, opts.model_name)
            if (current_url == changelist_url
                    and '_changelist_filters' in preserved_filters):
                preserved_filters = dict(
                    parse_qsl(preserved_filters['_changelist_filters']))

        merged_qs.update(preserved_filters)

    if popup:
        merged_qs[IS_POPUP_VAR] = 1

    if to_field:
        merged_qs[TO_FIELD_VAR] = to_field

    merged_qs.update(parsed_qs)

    parsed_url[4] = urlencode(merged_qs)
    return urlunparse(parsed_url)
예제 #54
0
def purge_urls_from_cache(urls, backend_settings=None, backends=None):
    # Convert each url to urls one for each managed language (WAGTAILFRONTENDCACHE_LANGUAGES setting).
    # The managed languages are common to all the defined backends.
    # This depends on settings.USE_I18N
    languages = getattr(settings, 'WAGTAILFRONTENDCACHE_LANGUAGES', [])
    if settings.USE_I18N and languages:
        langs_regex = "^/(%s)/" % "|".join(languages)
        new_urls = []

        # Purge the given url for each managed language
        for isocode, description in languages:
            for url in urls:
                up = urlparse(url)
                new_url = urlunparse((
                    up.scheme,
                    up.netloc,
                    re.sub(langs_regex, "/%s/" % isocode, up.path),
                    up.params,
                    up.query,
                    up.fragment
                ))

                # Check for best performance. True if re.sub found no match
                # It happens when i18n_patterns was not used in urls.py to serve content for different languages from different URLs
                if new_url in new_urls:
                    continue

                new_urls.append(new_url)

        urls = new_urls

    for backend_name, backend in get_backends(backend_settings, backends).items():
        for url in urls:
            logger.info("[%s] Purging URL: %s", backend_name, url)

        backend.purge_batch(urls)
예제 #55
0
from django import template
예제 #56
0
def get_pagination_context(
    page,
    pages_to_show=11,
    url=None,
    size=None,
    justify_content=None,
    extra=None,
    parameter_name="page",
):
    """
    Generate Bootstrap pagination context from a page object
    """
    pages_to_show = int(pages_to_show)
    if pages_to_show < 1:
        raise ValueError(
            "Pagination pages_to_show should be a positive integer, you specified {pages}".format(
                pages=pages_to_show
            )
        )
    num_pages = page.paginator.num_pages
    current_page = page.number
    half_page_num = int(floor(pages_to_show / 2))
    if half_page_num < 0:
        half_page_num = 0
    first_page = current_page - half_page_num
    if first_page <= 1:
        first_page = 1
    if first_page > 1:
        pages_back = first_page - half_page_num
        if pages_back < 1:
            pages_back = 1
    else:
        pages_back = None
    last_page = first_page + pages_to_show - 1
    if pages_back is None:
        last_page += 1
    if last_page > num_pages:
        last_page = num_pages
    if last_page < num_pages:
        pages_forward = last_page + half_page_num
        if pages_forward > num_pages:
            pages_forward = num_pages
    else:
        pages_forward = None
        if first_page > 1:
            first_page -= 1
        if pages_back is not None and pages_back > 1:
            pages_back -= 1
        else:
            pages_back = None
    pages_shown = []
    for i in range(first_page, last_page + 1):
        pages_shown.append(i)

    # parse the url
    parts = urlparse(url or "")
    params = parse_qs(parts.query)

    # append extra querystring parameters to the url.
    if extra:
        params.update(parse_qs(extra))

    # build url again.
    url = urlunparse(
        [
            parts.scheme,
            parts.netloc,
            parts.path,
            parts.params,
            urlencode(params, doseq=True),
            parts.fragment,
        ]
    )

    # Set CSS classes, see http://getbootstrap.com/components/#pagination
    pagination_css_classes = ["pagination"]
    if size == "small":
        pagination_css_classes.append("pagination-sm")
    elif size == "large":
        pagination_css_classes.append("pagination-lg")

    if justify_content == "start":
        pagination_css_classes.append("justify-content-start")
    elif justify_content == "center":
        pagination_css_classes.append("justify-content-center")
    elif justify_content == "end":
        pagination_css_classes.append("justify-content-end")

    return {
        "bootstrap_pagination_url": url,
        "num_pages": num_pages,
        "current_page": current_page,
        "first_page": first_page,
        "last_page": last_page,
        "pages_shown": pages_shown,
        "pages_back": pages_back,
        "pages_forward": pages_forward,
        "pagination_css_classes": " ".join(pagination_css_classes),
        "parameter_name": parameter_name,
    }
예제 #57
0
def get_pagination_context(page, pages_to_show=11,
                           url=None, size=None, justify_content=None,
                           extra=None, parameter_name='page'):
    """
    Generate Bootstrap pagination context from a page object
    """
    pages_to_show = int(pages_to_show)
    if pages_to_show < 1:
        raise ValueError(
            "Pagination pages_to_show should be a positive integer, you specified {pages}".format(
                pages=pages_to_show)
        )
    num_pages = page.paginator.num_pages
    current_page = page.number
    half_page_num = int(floor(pages_to_show / 2))
    if half_page_num < 0:
        half_page_num = 0
    first_page = current_page - half_page_num
    if first_page <= 1:
        first_page = 1
    if first_page > 1:
        pages_back = first_page - half_page_num
        if pages_back < 1:
            pages_back = 1
    else:
        pages_back = None
    last_page = first_page + pages_to_show - 1
    if pages_back is None:
        last_page += 1
    if last_page > num_pages:
        last_page = num_pages
    if last_page < num_pages:
        pages_forward = last_page + half_page_num
        if pages_forward > num_pages:
            pages_forward = num_pages
    else:
        pages_forward = None
        if first_page > 1:
            first_page -= 1
        if pages_back is not None and pages_back > 1:
            pages_back -= 1
        else:
            pages_back = None
    pages_shown = []
    for i in range(first_page, last_page + 1):
        pages_shown.append(i)

    # parse the url
    parts = urlparse(url or '')
    params = parse_qs(parts.query)

    # append extra querystring parameters to the url.
    if extra:
        params.update(parse_qs(extra))

    # build url again.
    url = urlunparse([
        parts.scheme,
        parts.netloc,
        parts.path,
        parts.params,
        urlencode(params, doseq=True),
        parts.fragment
    ])

    # Set CSS classes, see http://getbootstrap.com/components/#pagination
    pagination_css_classes = ['pagination']
    if size == 'small':
        pagination_css_classes.append('pagination-sm')
    elif size == 'large':
        pagination_css_classes.append('pagination-lg')

    if justify_content == 'start':
        pagination_css_classes.append('justify-content-start')
    elif justify_content == 'center':
        pagination_css_classes.append('justify-content-center')
    elif justify_content == 'end':
        pagination_css_classes.append('justify-content-end')

    return {
        'bootstrap_pagination_url': url,
        'num_pages': num_pages,
        'current_page': current_page,
        'first_page': first_page,
        'last_page': last_page,
        'pages_shown': pages_shown,
        'pages_back': pages_back,
        'pages_forward': pages_forward,
        'pagination_css_classes': ' '.join(pagination_css_classes),
        'parameter_name': parameter_name,
    }