コード例 #1
0
ファイル: admin.py プロジェクト: kezabelle/django-varlet
    def autocomplete(self, request, *args, **kwargs):
        error_response = JsonResponse(data={'results': [], 'count': 0}, status=404)
        query = request.GET.get('q', '').strip()
        if not query:
            return error_response
        field = self.model._meta.get_field('url')
        try:
            query2 = field.to_python(query)
        except ValidationError:
            return error_response

        data = tuple(self.model.objects
                     .distinct()
                     .values_list('url', flat=True)
                     .exclude(url=query2)
                     .filter(url__icontains=query2)
                     .annotate(url_length=Length('url'))
                     .filter(url_length__gt=len(query2))
                     .order_by('-url_length')[0:25]
                     .iterator())
        names = ('prefix', 'match', 'suffix')
        sorted_data = tuple(
            {'name': obj, 'parts': dict(zip_longest(names, obj.partition(query2)))}
            for obj in data
        )
        return JsonResponse(data={'results': sorted_data[0:5], 'count': len(data)})
コード例 #2
0
    def resolve_columns(self, row, fields=()):
        """
        This routine is necessary so that distances and geometries returned
        from extra selection SQL get resolved appropriately into Python
        objects.
        """
        values = []
        aliases = list(self.query.extra_select)

        # Have to set a starting row number offset that is used for
        # determining the correct starting row index -- needed for
        # doing pagination with Oracle.
        rn_offset = 0
        if self.connection.ops.oracle:
            if self.query.high_mark is not None or self.query.low_mark:
                rn_offset = 1
        index_start = rn_offset + len(aliases)

        # Converting any extra selection values (e.g., geometries and
        # distance objects added by GeoQuerySet methods).
        values = [self.query.convert_values(v,
                               self.query.extra_select_fields.get(a, None),
                               self.connection)
                  for v, a in zip(row[rn_offset:index_start], aliases)]
        if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
            # We resolve the rest of the columns if we're on Oracle or if
            # the `geo_values` attribute is defined.
            for value, field in zip_longest(row[index_start:], fields):
                values.append(self.query.convert_values(value, field, self.connection))
        else:
            values.extend(row[index_start:])
        return tuple(values)
コード例 #3
0
ファイル: compiler.py プロジェクト: 10sr/hue
 def resolve_columns(self, row, fields=()):
     values = []
     index_extra_select = len(self.query.extra_select)
     for value, field in zip_longest(row[index_extra_select:], fields):
         if (field and field.get_internal_type() in ("BooleanField", "NullBooleanField") and
             value in (0, 1)):
             value = bool(value)
         values.append(value)
     return row[:index_extra_select] + tuple(values)
コード例 #4
0
 def resolve_columns(self, row, fields=()):
     values = []
     index_extra_select = len(self.query.extra_select)
     bool_fields = ("BooleanField", "NullBooleanField")
     for value, field in zip_longest(row[index_extra_select:], fields):
         if (field and field.get_internal_type() in bool_fields and
                 value in (0, 1)):
             value = bool(value)
         values.append(value)
     return row[:index_extra_select] + tuple(values)
コード例 #5
0
ファイル: utils.py プロジェクト: timorieber/wagtail
def determine_boosts_weights(boosts=()):
    if not boosts:
        boosts = get_boosts()
    boosts = list(sorted(boosts, reverse=True))
    min_boost = boosts[-1]
    if len(boosts) <= WEIGHTS_COUNT:
        return list(zip_longest(boosts, WEIGHTS, fillvalue=min(min_boost, 0)))
    max_boost = boosts[0]
    boost_step = (max_boost - min_boost) / (WEIGHTS_COUNT - 1)
    return [(max_boost - (i * boost_step), weight)
            for i, weight in enumerate(WEIGHTS)]
コード例 #6
0
ファイル: utils.py プロジェクト: waffle-iron/tuiuiu.io
def determine_boosts_weights(boosts=()):
    if not boosts:
        boosts = get_boosts()
    boosts = list(sorted(boosts, reverse=True))
    min_boost = boosts[-1]
    if len(boosts) <= WEIGHTS_COUNT:
        return list(zip_longest(boosts, WEIGHTS, fillvalue=min(min_boost, 0)))
    max_boost = boosts[0]
    boost_step = (max_boost - min_boost) / (WEIGHTS_COUNT - 1)
    return [(max_boost - (i * boost_step), weight)
            for i, weight in enumerate(WEIGHTS)]
コード例 #7
0
ファイル: views.py プロジェクト: mclumd/kyudo
    def get_requested_cases(self):
        """
        Extract the requested cases from the POST data
        """
        empties   = lambda n: [""]*n
        numcases  = self.parse_numcases()

        ## Attempt to get questions from GET example
        if self.request.GET.get('example', None):
            slug = self.request.GET.get('example').lower()
            if slug in preselected_cases:
                return list(zip_longest(
                    preselected_cases[slug]['questions'],
                    preselected_cases[slug]['answers'],
                    fillvalue=""
                ))

        ## Attempt to get questions from POST data otherwise default to empties
        questions = self.request.POST.getlist('question', empties(numcases))
        answers   = self.request.POST.getlist('answer', empties(numcases))
        return list(zip_longest(questions, answers, fillvalue=""))
コード例 #8
0
ファイル: compiler.py プロジェクト: robesson/AppsPy
 def resolve_columns(self, row, fields=()):
     # If this query has limit/offset information, then we expect the
     # first column to be an extra "_RN" column that we need to throw
     # away.
     if self.query.high_mark is not None or self.query.low_mark:
         rn_offset = 1
     else:
         rn_offset = 0
     index_start = rn_offset + len(self.query.extra_select)
     values = [self.query.convert_values(v, None, connection=self.connection) for v in row[rn_offset:index_start]]
     for value, field in zip_longest(row[index_start:], fields):
         values.append(self.query.convert_values(value, field, connection=self.connection))
     return tuple(values)
コード例 #9
0
    def csv_match(self, csv_file, expected_data, **csv_kwargs):
        assertion_results = []
        csv_data = csv.reader(csv_file, encoding='utf-8', **csv_kwargs)
        iteration_happened = False
        is_first = True
        test_pairs = list(zip_longest(csv_data, expected_data, fillvalue=[]))
        for csv_row, expected_row in test_pairs:
            if is_first:
                # add the BOM to the data
                expected_row = ([u'\ufeff' + expected_row[0]] +
                                expected_row[1:])
                is_first = False
            iteration_happened = True
            assertion_results.append(csv_row == expected_row)

        assertion_results.append(iteration_happened is True)

        return assertion_results
コード例 #10
0
    def csv_match(self, csv_file, expected_data, **csv_kwargs):
        assertion_results = []
        csv_data = csv.reader(csv_file, encoding='utf-8', **csv_kwargs)
        iteration_happened = False
        is_first = True
        test_pairs = list(zip_longest(csv_data, expected_data, fillvalue=[]))
        for csv_row, expected_row in test_pairs:
            if is_first:
                # add the BOM to the data
                expected_row = ([u'\ufeff' + expected_row[0]] +
                                expected_row[1:])
                is_first = False
            iteration_happened = True
            assertion_results.append(csv_row == expected_row)

        assertion_results.append(iteration_happened is True)

        return assertion_results
コード例 #11
0
ファイル: compiler.py プロジェクト: LAMike310/ecommerce
 def resolve_columns(self, row, fields=()):
     # If this query has limit/offset information, then we expect the
     # first column to be an extra "_RN" column that we need to throw
     # away.
     if self.query.high_mark is not None or self.query.low_mark:
         rn_offset = 1
     else:
         rn_offset = 0
     index_start = rn_offset + len(self.query.extra_select)
     values = [
         self.query.convert_values(v, None, connection=self.connection)
         for v in row[rn_offset:index_start]
     ]
     for value, field in zip_longest(row[index_start:], fields):
         values.append(
             self.query.convert_values(value,
                                       field,
                                       connection=self.connection))
     return tuple(values)
コード例 #12
0
def create_products_by_class(product_class,
                             schema,
                             placeholder_dir,
                             how_many=10,
                             create_images=True,
                             stdout=None):
    category_name = schema.get('category') or DEFAULT_CATEGORY
    category = get_or_create_category(category_name)

    for dummy in range(how_many):
        product = create_product(product_class=product_class)
        set_product_attributes(product, product_class)
        product.categories.add(category)
        if create_images:
            class_placeholders = os.path.join(placeholder_dir,
                                              schema['images_dir'])
            create_product_images(product, random.randrange(1, 5),
                                  class_placeholders)
        variant_combinations = get_variant_combinations(product)

        prices = get_price_override(schema, len(variant_combinations),
                                    product.price)
        variants_with_prices = moves.zip_longest(variant_combinations, prices)

        for i, variant_price in enumerate(variants_with_prices, start=1337):
            attr_combination, price = variant_price
            sku = '%s-%s' % (product.pk, i)
            create_variant(product,
                           attributes=attr_combination,
                           sku=sku,
                           price_override=price)

        if not variant_combinations:
            # Create min one variant for products without variant level attrs
            sku = '%s-%s' % (product.pk, fake.random_int(1000, 100000))
            create_variant(product, sku=sku)
        if stdout is not None:
            stdout.write(
                'Product: %s (%s), %s variant(s)' %
                (product, product_class.name, len(variant_combinations) or 1))
コード例 #13
0
    def test_changeset(self):
        """Testing PerforceTool.get_changeset"""
        desc = self.tool.get_changeset(157)
        self.assertEqual(desc.changenum, 157)
        self.assertEqual(type(desc.description), six.text_type)
        self.assertEqual(md5(desc.description.encode('utf-8')).hexdigest(),
                         'b7eff0ca252347cc9b09714d07397e64')

        expected_files = [
            '//public/perforce/api/python/P4Client/P4Clientmodule.cc',
            '//public/perforce/api/python/P4Client/p4.py',
            '//public/perforce/api/python/P4Client/review.py',
            '//public/perforce/python/P4Client/P4Clientmodule.cc',
            '//public/perforce/python/P4Client/p4.py',
            '//public/perforce/python/P4Client/review.py',
        ]

        for file, expected in zip_longest(desc.files, expected_files):
            self.assertEqual(file, expected)

        self.assertEqual(md5(desc.summary.encode('utf-8')).hexdigest(),
                         '99a335676b0e5821ffb2f7469d4d7019')
コード例 #14
0
ファイル: random_data.py プロジェクト: glosoftgroup/Hardware
def create_products_by_class(product_class, schema,
                             placeholder_dir, how_many=10, create_images=True,
                             stdout=None):
    category_name = schema.get('category') or DEFAULT_CATEGORY
    category = get_or_create_category(category_name)

    for dummy in range(how_many):
        product = create_product(product_class=product_class)
        set_product_attributes(product, product_class)
        product.categories.add(category)
        if create_images:
            class_placeholders = os.path.join(
                placeholder_dir, schema['images_dir'])
            create_product_images(
                product, random.randrange(1, 5), class_placeholders)
        variant_combinations = get_variant_combinations(product)

        prices = get_price_override(
            schema, len(variant_combinations), product.price)
        variants_with_prices = moves.zip_longest(
            variant_combinations, prices)

        for i, variant_price in enumerate(variants_with_prices, start=1337):
            attr_combination, price = variant_price
            sku = '%s-%s' % (product.pk, i)
            create_variant(
                product, attributes=attr_combination, sku=sku,
                price_override=price)

        if not variant_combinations:
            # Create min one variant for products without variant level attrs
            sku = '%s-%s' % (product.pk, fake.random_int(1000, 100000))
            create_variant(product, sku=sku)
        if stdout is not None:
            stdout.write('Product: %s (%s), %s variant(s)' % (
                product, product_class.name, len(variant_combinations) or 1))
コード例 #15
0
def get_data(album, photo=None, page=1, site_url=None, is_mobile=False):
    data = {
        'URL_PHOTOS': getattr(settings, 'WEBXIANG_PHOTOS_URL', 'data/'),
        'LAZY_LOADING': getattr(settings, 'WEBXIANG_PHOTOS_LAZY', False),
        'meta': {
            'template': 'default.html',
            'style': 'base.css',
            'title': 'Albums',
            'robots': 'noindex,nofollow',
            'custom_menu': False,
            'columns': 3,
            'ppp': 36,
            'reverse_order': False,
            'default_thumb_size': (180, 180),
            'cover': None,
        },
        'entries': [],
    }

    album_data = _open_albumfile(album)
    if not album_data:
        return None

    data['meta'].update(album_data.get('meta', {}))
    data['meta']['title_gallery'] = data['meta']['title'] or album
    data['entries'] = album_data.get('entries', [])

    # force mobile template
    if data['meta']['template'] == 'story' and is_mobile:
        data['meta']['template'] = 'floating'
        data['meta']['thumbs_skip'] = False

    baseurl = data['URL_PHOTOS']
    meta_path = data['meta'].get('path', '')

    # set a constant entry indexes
    for i, entry in enumerate(data['entries'], start=1):
        entry['index'] = i

    reverse_order = bool(data['meta']['reverse_order'])
    if reverse_order:
        data['entries'] = list(reversed(data['entries']))

    if photo and photo != 'geomap':
        mode = 'photo'
        lentries = len(data['entries'])

        photo_idx = photo.split('/')[0]
        if photo_idx.isdigit():
            photo_idx = int(photo_idx)
        else:
            photo_idx = None
            if not photo.lower().endswith('.jpg'):
                photo += '.jpg'
            for idx, ent in enumerate(data['entries']):
                if isinstance(ent['image'], six.string_types):
                    f = ent['image']
                else:
                    f = ent['image']['file']
                if photo == f:
                    if reverse_order:
                        photo_idx = lentries - idx
                    else:
                        photo_idx = idx + 1
                    break
            if photo_idx is None:
                return None

        if reverse_order:
            idx = lentries - photo_idx
            data['meta']['title'] = '#%s - %s' % \
                (photo_idx, data['meta']['title'] or album)
            prev_idx = photo_idx + 1 if photo_idx < lentries else None
            next_idx = photo_idx - 1 if photo_idx > 1 else None
        else:
            idx = photo_idx - 1
            data['meta']['title'] = '#%s - %s' % \
                (photo_idx, data['meta']['title'] or album)
            prev_idx = photo_idx - 1 if photo_idx > 1 else None
            next_idx = photo_idx + 1 if photo_idx < lentries else None

        entry = data['entry'] = data['entries'][idx]

        # determine canonical photo url
        canon_link = '%s/%s' % (photo_idx, entry['slug']) \
            if 'slug' in entry else photo_idx
        data['canonical_url'] = reverse('photo',
                                        kwargs={
                                            'album': album,
                                            'photo': canon_link
                                        })

        if prev_idx is not None:
            if reverse_order:
                slug = data['entries'][idx - 1].get('slug')
            else:
                slug = data['entries'][prev_idx - 1].get('slug')
            prev_photo = '%s/%s' % (prev_idx, slug) if slug else prev_idx
            data['prev_entry'] = reverse('photo',
                                         kwargs={
                                             'album': album,
                                             'photo': prev_photo
                                         })
        else:
            data['prev_entry'] = None

        if next_idx is not None:
            if reverse_order:
                slug = data['entries'][idx + 1].get('slug')
            else:
                slug = data['entries'][next_idx - 1].get('slug')
            next_photo = '%s/%s' % (next_idx, slug) if slug else next_idx
            data['next_entry'] = reverse('photo',
                                         kwargs={
                                             'album': album,
                                             'photo': next_photo
                                         })
        else:
            data['next_entry'] = None

        img = entry.get('image')
        if isinstance(img, six.string_types):
            f = entry['image']
            path = meta_path
            size = entry.get('size') or data['meta'].get('default_image_size')
        elif img:
            f = entry['image']['file']
            path = entry['image'].get('path', meta_path)
            size = entry['image'].get('size') or data['meta'].get(
                'default_image_size')
        else:  # video
            _parse_video_entry(entry)
            path = meta_path
            f = size = ''

        path = urllib.parse.urljoin(baseurl, path)
        entry['url'] = urllib.parse.urljoin(path, f)
        entry['size'] = size

        if reverse_order:
            page = int(
                math.floor((lentries - photo_idx) /
                           float(data['meta']['ppp'])) + 1)
        else:
            page = int(math.ceil(photo_idx / float(data['meta']['ppp'])))

        entry['link'] = reverse('album', kwargs={'album': album})
        if page > 1:
            entry['link'] += page_url({}, album, '', page)

        data['meta']['description'] = entry.get('description',
                                                data['meta']['title'])
        data['meta']['copyright'] = entry.get('copyright') or \
            data['meta'].get('copyright')

    else:
        if photo == 'geomap':
            mode = 'geomap'
        else:
            mode = 'album'

        if mode == 'geomap':
            data['meta']['ppp'] = 500

        paginator = Paginator(data['entries'], data['meta']['ppp'])
        try:
            data['entries'] = paginator.page(page)
        except (EmptyPage, InvalidPage):
            data['entries'] = paginator.page(paginator.num_pages)
            page = paginator.num_pages

        # use a limited page range
        pg_range = 6
        cindex = paginator.page_range.index(page)
        cmin, cmax = cindex - pg_range, cindex + pg_range
        if cmin < 0:
            cmin = 0
        paginator.page_range_limited = paginator.page_range[cmin:cmax]

        for i, entry in enumerate(data['entries'].object_list):

            img = entry.get('image')
            path = data['meta'].get('path', meta_path)
            path = urllib.parse.urljoin(baseurl, path)
            if isinstance(img, six.string_types):
                entry['url_full'] = urllib.parse.urljoin(path, img)
            elif img:
                entry['url_full'] = urllib.parse.urljoin(path, img['file'])

            if data['meta'].get('thumbs_skip'):
                img = entry.get('image')
                path = data['meta'].get('path', meta_path)
                item_type = 'image'
            else:
                img = entry.get('thumb', entry.get('image'))
                path = data['meta'].get('path_thumb', meta_path)
                item_type = 'thumb'

            if img:
                if isinstance(img, six.string_types):
                    f = img
                    entry['size'] = data['meta'].get('default_%s_size' %
                                                     item_type)
                else:
                    f = img['file']
                    path = img.get('path',
                                   data['meta'].get('path_thumb', meta_path))
                    entry['size'] = img.get(
                        'size',
                        data['meta'].get('default_%s_size' % item_type))

                path = urllib.parse.urljoin(baseurl, path)
                entry['url'] = urllib.parse.urljoin(path, f)

                if 'link' in entry:
                    pass
                elif 'album' in entry:
                    entry['link'] = reverse('album',
                                            kwargs={'album': entry['album']})
                else:
                    slug = entry.get('slug')
                    link = '%s/%s' % (entry['index'], slug) \
                        if slug else entry['index']
                    entry['link'] = reverse('photo',
                                            kwargs={
                                                'album': album,
                                                'photo': link
                                            })

            else:  # non-image entries
                path = urllib.parse.urljoin(baseurl, meta_path)
                _parse_video_entry(entry)

        # grouping entries into columns
        columns = int(data['meta'].get('columns', 3))
        if columns:
            data['groups'] = ((e for e in t if e != None) for t in zip_longest(
                *(iter(data['entries'].object_list), ) * columns))

        # set up geo points
        if mode == 'geomap':
            points = {}
            for entry in data['entries'].object_list:
                if 'geo' in entry:
                    p = entry['geo']
                    if p not in points:
                        points[p] = []
                    if 'exif' in entry:
                        del entry['exif']
                    points[p].append(entry)
            points = sorted([(k, v) for k, v in list(points.items())],
                            key=lambda x: x[1][0]['index'])
            wxpb_settings = getattr(settings, 'WXPB_SETTINGS', None) or {}
            wxpb_settings.update(data.get('settings') or {})
            wxpb_settings['geo_points'] = points
            data['wxpb_settings'] = json.dumps(wxpb_settings)
            del data['entries']

    if data['meta']['style'] and not data['meta']['style'].endswith('.css'):
        data['meta']['style'] += '.css'

    # handle cover's URL
    cover = data['meta']['cover']
    if cover and not cover.startswith('/'):
        cover = urllib.parse.urljoin(path, cover)
    if cover and site_url:
        cover = urllib.parse.urljoin(site_url, cover)
    data['meta']['cover'] = cover

    ctx = {
        'mode': mode,
        'album': album,
    }
    ctx.update(data)

    return ctx
コード例 #16
0
    def generate_chunks(self,
                        old,
                        new,
                        old_encoding_list=None,
                        new_encoding_list=None):
        """Generate chunks for the difference between two strings.

        The strings will be normalized, ensuring they're of the proper
        encoding and ensuring they have consistent newlines. They're then
        syntax-highlighted (if requested).

        Once the strings are ready, chunks are built from the strings and
        yielded to the caller. Each chunk represents information on an
        equal, inserted, deleted, or replaced set of lines.

        The number of lines of each chunk type are stored in the
        :py:attr:`counts` dictionary, which can then be accessed after
        yielding all chunks.

        Args:
            old (bytes or list of bytes):
                The old data being modified.

            new (bytes or list of bytes):
                The new data.

            old_encoding_list (list of unicode, optional):
                An optional list of encodings that ``old`` may be encoded in.
                If not provided, :py:attr:`encoding_list` is used.

            new_encoding_list (list of unicode, optional):
                An optional list of encodings that ``new`` may be encoded in.
                If not provided, :py:attr:`encoding_list` is used.

        Yields:
            dict:
            A rendered chunk containing the following keys:

            ``index`` (int)
                The 0-based index of the chunk.

            ``lines`` (list of unicode):
                The rendered list of lines.

            ``numlines`` (int):
                The number of lines in the chunk.

            ``change`` (unicode):
                The type of change (``delete``, ``equal``, ``insert`` or
                ``replace``).

            ``collapsable`` (bool):
                Whether the chunk can be collapsed.

            ``meta`` (dict):
                Metadata on the chunk.
        """
        is_lists = isinstance(old, list)
        assert is_lists == isinstance(new, list)

        if old_encoding_list is None:
            old_encoding_list = self.encoding_list

        if new_encoding_list is None:
            new_encoding_list = self.encoding_list

        if is_lists:
            if self.encoding_list:
                old = self.normalize_source_list(old, old_encoding_list)
                new = self.normalize_source_list(new, new_encoding_list)

            a = old
            b = new
        else:
            old, a = self.normalize_source_string(old, old_encoding_list)
            new, b = self.normalize_source_string(new, new_encoding_list)

        a_num_lines = len(a)
        b_num_lines = len(b)

        if is_lists:
            markup_a = a
            markup_b = b
        else:
            markup_a = None
            markup_b = None

            if self._get_enable_syntax_highlighting(old, new, a, b):
                # TODO: Try to figure out the right lexer for these files
                #       once instead of twice.
                markup_a = self._apply_pygments(
                    old or '',
                    self.normalize_path_for_display(self.orig_filename))
                markup_b = self._apply_pygments(
                    new or '',
                    self.normalize_path_for_display(self.modified_filename))

            if not markup_a:
                markup_a = self.NEWLINES_RE.split(escape(old))

            if not markup_b:
                markup_b = self.NEWLINES_RE.split(escape(new))

        siteconfig = SiteConfiguration.objects.get_current()
        ignore_space = True

        for pattern in siteconfig.get('diffviewer_include_space_patterns'):
            if fnmatch.fnmatch(self.orig_filename, pattern):
                ignore_space = False
                break

        self.differ = get_differ(a,
                                 b,
                                 ignore_space=ignore_space,
                                 compat_version=self.diff_compat)
        self.differ.add_interesting_lines_for_headers(self.orig_filename)

        context_num_lines = siteconfig.get("diffviewer_context_num_lines")
        collapse_threshold = 2 * context_num_lines + 3

        line_num = 1
        opcodes_generator = self.get_opcode_generator()

        counts = {
            'equal': 0,
            'replace': 0,
            'insert': 0,
            'delete': 0,
        }

        for tag, i1, i2, j1, j2, meta in opcodes_generator:
            old_lines = markup_a[i1:i2]
            new_lines = markup_b[j1:j2]
            num_lines = max(len(old_lines), len(new_lines))

            lines = [
                self._diff_line(tag, meta, *diff_args)
                for diff_args in zip_longest(
                    range(line_num, line_num +
                          num_lines), range(i1 + 1, i2 + 1),
                    range(j1 + 1, j2 +
                          1), a[i1:i2], b[j1:j2], old_lines, new_lines)
            ]

            counts[tag] += num_lines

            if tag == 'equal' and num_lines > collapse_threshold:
                last_range_start = num_lines - context_num_lines

                if line_num == 1:
                    yield self._new_chunk(lines, 0, last_range_start, True)
                    yield self._new_chunk(lines, last_range_start, num_lines)
                else:
                    yield self._new_chunk(lines, 0, context_num_lines)

                    if i2 == a_num_lines and j2 == b_num_lines:
                        yield self._new_chunk(lines, context_num_lines,
                                              num_lines, True)
                    else:
                        yield self._new_chunk(lines, context_num_lines,
                                              last_range_start, True)
                        yield self._new_chunk(lines, last_range_start,
                                              num_lines)
            else:
                yield self._new_chunk(lines, 0, num_lines, False, tag, meta)

            line_num += num_lines

        self.counts = counts
コード例 #17
0
def get_data(album, photo=None, page=1, site_url=None, is_mobile=False):
    data = {
        'URL_PHOTOS': getattr(settings, 'WEBXIANG_PHOTOS_URL', 'data/'),
        'LAZY_LOADING': getattr(settings, 'WEBXIANG_PHOTOS_LAZY', False),
        'meta': {
            'template': 'default.html',
            'style': 'base.css',
            'title': 'Albums',
            'robots': 'noindex,nofollow',
            'custom_menu': False,
            'columns': 3,
            'ppp': 36,
            'reverse_order': False,
            'default_thumb_size': (180, 180),
            'cover': None,
        },
        'entries': [],
    }

    album_data = _open_albumfile(album)
    if not album_data:
        return None

    data['meta'].update(album_data.get('meta', {}))
    data['meta']['title_gallery'] = data['meta']['title'] or album
    data['entries'] = album_data.get('entries', [])

    # force mobile template
    if data['meta']['template'] == 'story' and is_mobile:
        data['meta']['template'] = 'floating'
        data['meta']['thumbs_skip'] = False

    baseurl = data['URL_PHOTOS']
    meta_path = data['meta'].get('path', '')

    # set a constant entry indexes
    for i, entry in enumerate(data['entries'], start=1):
        entry['index'] = i

    reverse_order = bool(data['meta']['reverse_order'])
    if reverse_order:
        data['entries'] = list(reversed(data['entries']))

    if photo and photo != 'geomap':
        mode = 'photo'
        lentries = len(data['entries'])

        photo_idx = photo.split('/')[0]
        if photo_idx.isdigit():
            photo_idx = int(photo_idx)
        else:
            photo_idx = None
            if not photo.lower().endswith('.jpg'):
                photo += '.jpg'
            for idx, ent in enumerate(data['entries']):
                if isinstance(ent['image'], six.string_types):
                    f = ent['image']
                else:
                    f = ent['image']['file']
                if photo == f:
                    if reverse_order:
                        photo_idx = lentries - idx
                    else:
                        photo_idx = idx + 1
                    break
            if photo_idx is None:
                return None

        if reverse_order:
            idx = lentries - photo_idx
            data['meta']['title'] = '#%s - %s' % \
                (photo_idx, data['meta']['title'] or album)
            prev_idx = photo_idx + 1 if photo_idx < lentries else None
            next_idx = photo_idx - 1 if photo_idx > 1 else None
        else:
            idx = photo_idx - 1
            data['meta']['title'] = '#%s - %s' % \
                (photo_idx, data['meta']['title'] or album)
            prev_idx = photo_idx - 1 if photo_idx > 1 else None
            next_idx = photo_idx + 1 if photo_idx < lentries else None

        entry = data['entry'] = data['entries'][idx]

        # determine canonical photo url
        canon_link = '%s/%s' % (photo_idx, entry['slug']) \
            if 'slug' in entry else photo_idx
        data['canonical_url'] = reverse('photo', kwargs={
            'album': album,
            'photo': canon_link})

        if prev_idx is not None:
            if reverse_order:
                slug = data['entries'][idx - 1].get('slug')
            else:
                slug = data['entries'][prev_idx - 1].get('slug')
            prev_photo = '%s/%s' % (prev_idx, slug) if slug else prev_idx
            data['prev_entry'] = reverse('photo', kwargs={
                'album': album,
                'photo': prev_photo})
        else:
            data['prev_entry'] = None

        if next_idx is not None:
            if reverse_order:
                slug = data['entries'][idx + 1].get('slug')
            else:
                slug = data['entries'][next_idx - 1].get('slug')
            next_photo = '%s/%s' % (next_idx, slug) if slug else next_idx
            data['next_entry'] = reverse('photo', kwargs={
                'album': album,
                'photo': next_photo})
        else:
            data['next_entry'] = None

        img = entry.get('image')
        if isinstance(img, six.string_types):
            f = entry['image']
            path = meta_path
            size = entry.get('size') or data[
                'meta'].get('default_image_size')
        elif img:
            f = entry['image']['file']
            path = entry['image'].get('path', meta_path)
            size = entry['image'].get('size') or data[
                'meta'].get('default_image_size')
        else:  # video
            _parse_video_entry(entry)
            path = meta_path
            f = size = ''

        path = urllib.parse.urljoin(baseurl, path)
        entry['url'] = urllib.parse.urljoin(path, f)
        entry['size'] = size

        if reverse_order:
            page = int(math.floor((lentries - photo_idx) /
                                  float(data['meta']['ppp'])) + 1)
        else:
            page = int(math.ceil(photo_idx / float(data['meta']['ppp'])))

        entry['link'] = reverse('album', kwargs={'album': album})
        if page > 1:
            entry['link'] += page_url({}, album, '', page)

        data['meta']['description'] = entry.get('description',
                                                data['meta']['title'])
        data['meta']['copyright'] = entry.get('copyright') or \
            data['meta'].get('copyright')

        points = {}
        if 'geo' in entry:
            p = entry['geo']
            if p not in points:
                points[p] = []
            if 'exif' in entry:
                del entry['exif']
            points[p].append(entry)
        points = sorted([(k, v) for k, v in list(points.items())],
                        key=lambda x: x[1][0]['index'])
        wxpb_settings = getattr(settings, 'WXPB_SETTINGS', None) or {}
        wxpb_settings.update(data.get('settings') or {})
        wxpb_settings['geo_points'] = points
        data['wxpb_settings'] = json.dumps(wxpb_settings)

    else:
        if photo == 'geomap':
            mode = 'geomap'
        else:
            mode = 'album'

        if mode == 'geomap':
            data['meta']['ppp'] = 500

        paginator = Paginator(data['entries'], data['meta']['ppp'])
        try:
            data['entries'] = paginator.page(page)
        except (EmptyPage, InvalidPage):
            data['entries'] = paginator.page(paginator.num_pages)
            page = paginator.num_pages

        # use a limited page range
        pg_range = 6
        cindex = paginator.page_range.index(page)
        cmin, cmax = cindex - pg_range, cindex + pg_range
        if cmin < 0:
            cmin = 0
        paginator.page_range_limited = paginator.page_range[cmin:cmax]

        for i, entry in enumerate(data['entries'].object_list):

            img = entry.get('image')
            path = data['meta'].get('path', meta_path)
            path = urllib.parse.urljoin(baseurl, path)
            if isinstance(img, six.string_types):
                entry['url_full'] = urllib.parse.urljoin(path, img)
            elif img:
                entry['url_full'] = urllib.parse.urljoin(path, img['file'])

            if data['meta'].get('thumbs_skip'):
                img = entry.get('image')
                path = data['meta'].get('path', meta_path)
                item_type = 'image'
            else:
                img = entry.get('thumb', entry.get('image'))
                path = data['meta'].get('path_thumb', meta_path)
                item_type = 'thumb'

            if img:
                if isinstance(img, six.string_types):
                    f = img
                    entry['size'] = data['meta'].get('default_%s_size' %
                                                     item_type)
                else:
                    f = img['file']
                    path = img.get('path',
                                   data['meta'].get('path_thumb', meta_path))
                    entry['size'] = img.get(
                        'size',
                        data['meta'].get('default_%s_size' % item_type))

                path = urllib.parse.urljoin(baseurl, path)
                entry['url'] = urllib.parse.urljoin(path, f)

                if 'link' in entry:
                    pass
                elif 'album' in entry:
                    entry['link'] = reverse('album', kwargs={
                        'album': entry['album']})
                else:
                    slug = entry.get('slug')
                    link = '%s/%s' % (entry['index'], slug) \
                        if slug else entry['index']
                    entry['link'] = reverse('photo', kwargs={
                        'album': album,
                        'photo': link})

            else:  # non-image entries
                path = urllib.parse.urljoin(baseurl, meta_path)
                _parse_video_entry(entry)

        # grouping entries into columns
        columns = int(data['meta'].get('columns', 3))
        if columns:
            data['groups'] = (
                (e for e in t if e != None)
                for t in zip_longest(
                        *(iter(data['entries'].object_list),) * columns)
            )

        # set up geo points
        if mode == 'geomap':
            points = {}
            for entry in data['entries'].object_list:
                if 'geo' in entry:
                    p = entry['geo']
                    if p not in points:
                        points[p] = []
                    if 'exif' in entry:
                        del entry['exif']
                    points[p].append(entry)
            points = sorted([(k, v) for k, v in list(points.items())],
                            key=lambda x: x[1][0]['index'])
            wxpb_settings = getattr(settings, 'WXPB_SETTINGS', None) or {}
            wxpb_settings.update(data.get('settings') or {})
            wxpb_settings['geo_points'] = points
            data['wxpb_settings'] = json.dumps(wxpb_settings)
            del data['entries']

    if data['meta']['style'] and not data['meta']['style'].endswith('.css'):
        data['meta']['style'] += '.css'

    # handle cover's URL
    cover = data['meta']['cover']
    if cover and not cover.startswith('/'):
        cover = urllib.parse.urljoin(path, cover)
    if cover and site_url:
        cover = urllib.parse.urljoin(site_url, cover)
    data['meta']['cover'] = cover

    ctx = {
        'mode': mode,
        'album': album,
    }
    ctx.update(data)

    return ctx
コード例 #18
0
ファイル: chunk_generator.py プロジェクト: xyygit/reviewboard
    def generate_chunks(self, old, new):
        """Generate chunks for the difference between two strings.

        The strings will be normalized, ensuring they're of the proper
        encoding and ensuring they have consistent newlines. They're then
        syntax-highlighted (if requested).

        Once the strings are ready, chunks are built from the strings and
        yielded to the caller. Each chunk represents information on an
        equal, inserted, deleted, or replaced set of lines.

        The number of lines of each chunk type are stored in the
        :py:attr:`counts` dictionary, which can then be accessed after
        yielding all chunks.
        """
        is_lists = isinstance(old, list)
        assert is_lists == isinstance(new, list)

        if is_lists:
            if self.encoding_list:
                old = self.normalize_source_list(old)
                new = self.normalize_source_list(new)

            a = old
            b = new
        else:
            old, a = self.normalize_source_string(old)
            new, b = self.normalize_source_string(new)

        a_num_lines = len(a)
        b_num_lines = len(b)

        if is_lists:
            markup_a = a
            markup_b = b
        else:
            markup_a = None
            markup_b = None

            if self._get_enable_syntax_highlighting(old, new, a, b):
                source_file = \
                    self.normalize_path_for_display(self.orig_filename)
                dest_file = \
                    self.normalize_path_for_display(self.modified_filename)

                try:
                    # TODO: Try to figure out the right lexer for these files
                    #       once instead of twice.
                    if not source_file.endswith(self.STYLED_EXT_BLACKLIST):
                        markup_a = self._apply_pygments(old or '', source_file)

                    if not dest_file.endswith(self.STYLED_EXT_BLACKLIST):
                        markup_b = self._apply_pygments(new or '', dest_file)
                except:
                    pass

            if not markup_a:
                markup_a = self.NEWLINES_RE.split(escape(old))

            if not markup_b:
                markup_b = self.NEWLINES_RE.split(escape(new))

        siteconfig = SiteConfiguration.objects.get_current()
        ignore_space = True

        for pattern in siteconfig.get('diffviewer_include_space_patterns'):
            if fnmatch.fnmatch(self.orig_filename, pattern):
                ignore_space = False
                break

        self.differ = get_differ(a,
                                 b,
                                 ignore_space=ignore_space,
                                 compat_version=self.diff_compat)
        self.differ.add_interesting_lines_for_headers(self.orig_filename)

        context_num_lines = siteconfig.get("diffviewer_context_num_lines")
        collapse_threshold = 2 * context_num_lines + 3

        line_num = 1
        opcodes_generator = self.get_opcode_generator()

        counts = {
            'equal': 0,
            'replace': 0,
            'insert': 0,
            'delete': 0,
        }

        for tag, i1, i2, j1, j2, meta in opcodes_generator:
            old_lines = markup_a[i1:i2]
            new_lines = markup_b[j1:j2]
            num_lines = max(len(old_lines), len(new_lines))

            lines = [
                self._diff_line(tag, meta, *diff_args)
                for diff_args in zip_longest(
                    range(line_num, line_num +
                          num_lines), range(i1 + 1, i2 + 1),
                    range(j1 + 1, j2 +
                          1), a[i1:i2], b[j1:j2], old_lines, new_lines)
            ]

            counts[tag] += num_lines

            if tag == 'equal' and num_lines > collapse_threshold:
                last_range_start = num_lines - context_num_lines

                if line_num == 1:
                    yield self._new_chunk(lines, 0, last_range_start, True)
                    yield self._new_chunk(lines, last_range_start, num_lines)
                else:
                    yield self._new_chunk(lines, 0, context_num_lines)

                    if i2 == a_num_lines and j2 == b_num_lines:
                        yield self._new_chunk(lines, context_num_lines,
                                              num_lines, True)
                    else:
                        yield self._new_chunk(lines, context_num_lines,
                                              last_range_start, True)
                        yield self._new_chunk(lines, last_range_start,
                                              num_lines)
            else:
                yield self._new_chunk(lines, 0, num_lines, False, tag, meta)

            line_num += num_lines

        self.counts = counts