コード例 #1
0
    def test_init_uids(self):
        slugify = UniqueSlugify(uids=['This-is-my-test', 'This-is-another-test'])
        self.assertEqual(slugify('This % is a test ---'), 'This-is-a-test')
        self.assertEqual(slugify('This % is my test ---'), 'This-is-my-test-1')
        self.assertTrue(isinstance(slugify.uids, set))

        slugify = UniqueSlugify(uids=set(["let-me-not", "to-the-marriage", "of-true-minds"]))
        self.assertEqual(slugify("of-true-minds"), "of-true-minds-1")
        self.assertEqual(slugify("of-true-minds"), "of-true-minds-2")
コード例 #2
0
ファイル: services.py プロジェクト: ps-iria/foodgram-project
def save_recipe(request, form, ingredients):
    """Получить список активных тегов для фильтрации рецептов"""
    try:
        with transaction.atomic():
            custom_slugify = UniqueSlugify()
            recipe = form.save(commit=False)
            recipe.author = request.user
            if recipe.slug is None:
                slug = custom_slugify(form.cleaned_data['title'])
                slug_count = Recipe.objects.filter(
                    slug__startswith=slug).count()
                if slug_count > 0:
                    recipe.slug = f'{slug}-{slug_count}'
                else:
                    recipe.slug = slug
            recipe.save()
            RecipeIngredient.objects.filter(recipe=recipe).delete()
            objs = []
            for title, count in ingredients.items():
                ingredient = get_object_or_404(Ingredient, title=title)
                objs.append(
                    RecipeIngredient(recipe=recipe,
                                     ingredient=ingredient,
                                     count=count))
            RecipeIngredient.objects.bulk_create(objs)
            form.save_m2m()
            return recipe
    except IntegrityError:
        raise HttpResponseBadRequest
コード例 #3
0
 def test_unique_other(self):
     slugify = UniqueSlugify()
     self.assertEqual(slugify('This % is another test ---', separator='_'),
                      'This_is_another_test')
     self.assertEqual(
         slugify('- - -This -- is another ## test ---', separator='_'),
         'This_is_another_test_1')
コード例 #4
0
ファイル: compile.py プロジェクト: mverleg/notex_pkgs
	def make_toc(self, soup):
		""" Add sections and create outline. """
		def skip_hidden(elem):
			return 'toc-hidden' not in elem.attrs.get('class', ())
		top = outline(soup, filter=skip_hidden)
		""" Add slugs to each section. """
		slugifier = UniqueSlugify(to_lower=True)
		for item in top.flat():
			item.slug = 'ch-' + slugifier(item.name)
			if item.header:
				link = BeautifulSoup.new_tag(item.header, 'a', href='#{0:s}'.format(item.slug))
				for part in item.header.children:
					link.append(part)
				item.header.append(link)
			item.elem.attrs['id'] = (item.elem.attrs.get('id', '') + ' ' + item.slug).strip(' ')
		""" Generate the table of content. """
		def toc_level(section, show=False):
			html = ''
			if show:
				html = '<li><a href="#{0:s}">{1:s}</a></li>'.format(section.slug, section.name)
			if section.children:
				html += '<ol>' + '\n'.join(toc_level(child, show=True) for child in section.children) + '</ol>'
			return html
		html = '<nav class="table-of-contents" role="navigation">' + toc_level(top) + '</nav>'
		return self.config.parser.parse_partial(html)
コード例 #5
0
 def on_model_change(self, form, model, is_created):
     if is_created:
         model.created_by = current_user
         unique_slug = UniqueSlugify(to_lower=True)
         model.slug = unique_slug(model.name)
     else:
         slug = Slugify(to_lower=True)
         model.slug = slug(model.name)
コード例 #6
0
def create_user_profile(sender, instance, created, **kwargs):
    if created:
        slugify_unique = UniqueSlugify(unique_check=my_unique_check,
                                       separator='_',
                                       to_lower=True,
                                       max_length=100)
        slug = slugify_unique(instance.username)
        Profile.objects.create(user=instance, slug=slug)
コード例 #7
0
ファイル: models.py プロジェクト: leevg/KpiGirls-Redux
def create_facemash(sender, instance, created, **kwargs):
    if created:
        slugify_unique = UniqueSlugify(unique_check=my_unique_check,
                                       separator='-',
                                       to_lower=True,
                                       max_length=100)
        instance.slug = slugify_unique(instance.first_name + '.' +
                                       instance.last_name)
        instance.save()
コード例 #8
0
 def make_slug(cls, title):
     """
     Redefined to create unique post slug from title.
     """
     slugify_title = UniqueSlugify(
         unique_check=Post.unique_post_slug_checker,
         to_lower=True,
         max_length=50)
     return slugify_title(title)
コード例 #9
0
def generate_slug(title):
    """Generate unique slug from entry title."""
    slug = UniqueSlugify(to_lower=True)
    while True:
        new_slug = slug(title)
        if Entry.get_or_none(Entry.slug == new_slug):
            continue
        else:
            return new_slug
コード例 #10
0
    def generate_slug(self):
        """Create unique slug from title"""

        all_slugs = Post.query.with_entities(Post.slug).all()

        if self.title:
            custom_slugify = UniqueSlugify(to_lower=True,
                                           uids=[slug for slug, in all_slugs])
            self.slug = custom_slugify(self.title)
コード例 #11
0
    def get(self, slug=None):
        """render the view"""
        form = DuplicateForm(self.request.form, config=self.config)
        slugify = UniqueSlugify(separator='_', max_length=50, to_lower=True)

        if self.request.method == 'POST' and form.validate():

            f = form.data

            # as a test just copy everything by just changing the existing camp
            self.barcamp.name = self._("Copy of ") + self.barcamp.name
            new_slug = slugify(self.barcamp.name)
            while self.config.dbs.barcamps.by_slug(new_slug):
                new_slug = new_slug + "_1"

            self.barcamp.slug = new_slug
            self.barcamp.workflow = "created"
            self.barcamp.registration_data = {}

            # either delete all events or just the people
            if "events" not in self.request.form:
                self.barcamp.events = {}
            else:
                for eid, event in self.barcamp.events.items():
                    event['participants'] = []
                    event['maybe'] = []
                    event['waiting_list'] = []
                    event['timetable']['sessions'] = {}
                    self.barcamp.events[eid] = event

            # delete ticket classes if switched off
            if "tickets" not in self.request.form:
                self.barcamp.ticket_classes = []
            else:
                # if events are deleted we need to delete them from the tickets, too
                if "events" not in self.request.form:
                    new_tcs = []
                    for tc in self.barcamp.ticket_classes:
                        tc['events'] = []
                        new_tcs.append(tc)
                    self.barcamp.ticket_classes = new_tcs

            # create new pad uids
            self.barcamp.planning_pad = unicode(uuid.uuid4())
            self.barcamp.documentation_pad = unicode(uuid.uuid4())

            # make it a new one
            self.barcamp['_id'] = ObjectId()

            barcamp = self.config.dbs.barcamps.put(self.barcamp)
            self.flash(self._("The barcamp has been duplicated."),
                       category="info")
            return redirect(
                self.url_for("barcamps.edit", slug=self.barcamp.slug))

        return self.render(form=form)
コード例 #12
0
 def save(self, *args, **kwargs):
     """Override save method to populate slug field on class."""
     # TODO change slug if title changes?
     slug_attr = self.get_slug_attr()
     if not self.slug and slug_attr:
         checker = UniqueSlugify(unique_check=self.slug_uniqueness_check,
                                 to_lower=True,
                                 max_length=200)
         self.slug = checker(slug_attr, uids=self.get_initial_slug_uids())
     return super().save(*args, **kwargs)
コード例 #13
0
    def test_is_unique_override(self):
        def my_unique_check(text, uids):
            return len(text) > 3 and text not in uids

        slugify = UniqueSlugify(unique_check=my_unique_check)

        self.assertEqual(slugify('te occidere possunt'), 'te-occidere-possunt')
        self.assertEqual(slugify('te occidere possunt'), 'te-occidere-possunt-1')
        self.assertEqual(slugify('boo'), 'boo-1')
        self.assertEqual(slugify('boo'), 'boo-2')
コード例 #14
0
def slugify(values, ensure_unique=False, **kwargs):
    """
    Given a sequence of strings, returns a standardized version of the sequence.
    If ``ensure_unique`` is True, any duplicate strings will be appended with
    a unique identifier. Any kwargs will be passed to the Slugify or
    UniqueSlugify class constructor

    See: https://github.com/dimka665/awesome-slugify
    """
    # Default to all lowercase
    slug_args = {'to_lower': True}
    slug_args.update(kwargs)

    if ensure_unique:
        custom_slugify = UniqueSlugify(**slug_args)
    else:
        custom_slugify = Slugify(**slug_args)

    return tuple(custom_slugify(value) for value in values)
コード例 #15
0
def main(ztype, outfile, names, highlight_title):

    url = 'http://platforms.axds.co/platforms/byclass/ioosgliderdac/'
    lst = requests.get(url).json()

    datafile = '{name}.enhanced.nc'
    datafolder = '/mnt/store/data/platforms/prod/IoosGliderDac/{uuid}/download/'
    slug = UniqueSlugify(separator='_', to_lower=True)

    if "all" in names:
        names = [x['name'] for x in lst]

    datasets = []
    for n in sorted(names):

        try:
            gd = next(x for x in lst if x['slug'].lower() == n.lower())
            assert 'Enhanced NetCDF' in [y['name'] for y in gd['datafiles']]
        except StopIteration:
            print('No glider {} found!'.format(n))
            continue
        except AssertionError:
            print('No enhanced.nc file for {} found!'.format(n))
            continue

        xml = ioos_template.format(slug=slug(n),
                                   file=datafile.format(name=n),
                                   folder=datafolder.format(uuid=gd['id']),
                                   title='{} - {}'.format(highlight_title, n))
        datasets.append(etree.fromstring(xml))

    with open(outfile, 'wt') as f:
        for d in datasets:
            try:
                f.write(
                    etree.tostring(d,
                                   encoding='ISO-8859-1',
                                   pretty_print=True,
                                   xml_declaration=False).decode('iso-8859-1'))
                f.write('\n')
            except UnicodeDecodeError:
                print("ERROR WITH: {}\n\n".format(etree.tostring(d)))
コード例 #16
0
ファイル: utils.py プロジェクト: Jickelsen/Arche
def generate_slug(parent, text, limit=40):
    """ Suggest a name for content that will be added.
        text is a title or similar to be used.
    """
    #Stop words configurable?
    #We don't have any language settings anywhere
    #Note about kw uids: It's keys already used.
    used_names = set(parent.keys())
    request = get_current_request()
    used_names.update(get_context_view_names(parent, request))
    sluggo = UniqueSlugify(to_lower=True,
                           stop_words=['a', 'an', 'the'],
                           max_length=80,
                           uids=used_names)
    suggestion = sluggo(text)
    if not len(suggestion):
        raise ValueError("When text was made URL-friendly, nothing remained.")
    if check_unique_name(parent, request, suggestion):
        return suggestion
    raise KeyError("No unique id could be found")
コード例 #17
0
def save_image(info, destination):
    """ Downloads the image to the URL

    @param info: dict with metadata about image
    @param destination: directory where to download
    """

    url = info['link']
    logger.info("Downloading %s", url)

    suffix = url.split('/')[-1].split('.')[-1]

    if not suffix or '.' not in suffix:
        suffix = info['type'].split('/')[-1]

    if suffix == 'jpeg':
        suffix = 'jpg'

    title = info['title'] or info['id']

    sluger = UniqueSlugify(uids=os.listdir(destination))
    slug = sluger(title)
    filename = "%s.%s" % (slug, suffix)
    filepath = os.path.join(destination, filename)

    download(info['link'], filepath)

    description = info['description']

    if description:
        txtpath = os.path.join(destination, '%s.txt' % slug)
        with open(txtpath, 'w') as f:
            f.write("Title: %s\r" % title)
            f.write("Description: %s\r" % description)

        if G['find-albums']:
            for album in find_albums(description):
                logger.info("Queuing download of album: %s", album)
                processor.put(lambda: download_album(album=album))
コード例 #18
0
class POIM(models.Model):
    name = models.CharField(max_length=250)
    slug = models.SlugField(max_length=250,
                            null=False,
                            blank=False,
                            unique=True,
                            db_index=True)
    location = geomodels.PointField(blank=False, null=False)
    description = models.TextField()
    status = models.CharField(max_length=50,
                              choices=choices.STATUSES,
                              default=choices.PENDING)
    severity = models.IntegerField(choices=choices.SEVERITIES, default=1)
    tags = TaggableManager()
    owner = models.ForeignKey("api.User",
                              on_delete=models.SET_NULL,
                              null=True,
                              related_name='poims')
    created_date = models.DateTimeField(blank=True,
                                        null=False,
                                        auto_now_add=True)
    updated_date = models.DateTimeField(blank=True, null=True, auto_now=True)

    _slug_generator = UniqueSlugify(
        max_length=250,
        to_lower=True,
        unique_check=unique_slug_checker('api.POIM'))

    def __str__(self):
        return self.name

    def save(self, *args, **kwargs):
        if not self.id:
            self.slug = self._slug_generator(self.name)

        return super().save(*args, **kwargs)
コード例 #19
0
 def test_unique(self):
     slugify = UniqueSlugify()
     self.assertEqual(slugify('This % is another test ---'), 'This-is-another-test')
     self.assertEqual(slugify('- - -This -- is another ## test ---'), 'This-is-another-test-1')
コード例 #20
0
class AbbyOCR:
    """
    Class for the Abbyy OCR
    """

    base_url = 'https://cloud.ocrsdk.com/'
    logger = logging.getLogger(__name__)
    slugify = UniqueSlugify()
    _destination_folder = None
    timeout = 60 * 10

    def __init__(  # pylint: disable=too-many-arguments
            self,
            event_loop,
            destination_folder,
            input_file,
            input_folder,
            abbyy_login=None,
            abbyy_password=None):
        self.destination_folder = destination_folder
        self.input_file = input_file
        self.input_folder = input_folder
        self.event_loop = event_loop

        self.session = aiohttp.ClientSession(auth=aiohttp.helpers.BasicAuth(
            abbyy_login, abbyy_password),
                                             loop=event_loop)

        self.download_session = aiohttp.ClientSession(loop=event_loop)

    async def clean(self):
        """
        Close open sessions in the end of execution
        :return:
        """
        self.logger.info('close sessions')
        await self.session.close()
        await self.download_session.close()

    @classmethod
    def run(  # pylint: disable=too-many-arguments
            cls,
            destination_folder,
            input_file,
            input_folder,
            abbyy_login=None,
            abbyy_password=None):
        """
        Open event loop and run class entry point
        :param destination_folder: Path to the results folder
        :param input_file: path to the file with the files list
        :param input_folder: path to the folder with the files for treatment
        :param abbyy_login:  abbyy login
        :param abbyy_password: abby password
        :return:
        """
        cls.logger.info('AbbyOCR started')
        event_loop = asyncio.get_event_loop()
        try:
            event_loop.run_until_complete(
                cls.main(event_loop, destination_folder, input_file,
                         input_folder, abbyy_login, abbyy_password))
        finally:
            event_loop.close()
        cls.logger.info('AbbyOCR executed')

    @classmethod
    async def main(  # pylint: disable=too-many-arguments
            cls, event_loop, destination_folder, input_file, input_folder,
            abbyy_login, abbyy_password):
        """
        Class entry point. Create and run producer and consumers
        :param event_loop:
        :param destination_folder:
        :param input_file:
        :param input_folder:
        :param login:
        :param password:
        :return:
        """
        num_consumers = 5
        instance = cls(event_loop, destination_folder, input_file,
                       input_folder, abbyy_login, abbyy_password)
        # Create the queue with a fixed size so the producer
        # will block until the consumers pull some items out.
        cls.logger.info('Files queues created')
        files_queue = asyncio.Queue(maxsize=num_consumers * 2)
        # Scheduled the consumer tasks.
        consumers = [
            event_loop.create_task(instance.download_files(files_queue, n))
            for n in range(num_consumers)
        ]
        cls.logger.info('Created %s consumers', num_consumers)
        # Schedule the producer task.
        prod = event_loop.create_task(
            instance.upload_files(files_queue, num_consumers))
        cls.logger.info('Created producer')
        # Wait for all of the coroutines to finish.
        cls.logger.info('Start processing')
        await asyncio.wait(consumers + [prod])
        cls.logger.info('Close sessions')
        await instance.clean()

    async def upload_files(self, queue, num_workers):
        """
        Producer. Upload file to the API
        :param queue: queue of the tasks to sync consumers
        :param num_workers: number of async uploads
        :return:
        """
        self.logger.info('Files uploading started')
        # Add some numbers to the queue to simulate jobs

        routines = []
        for file in open(self.input_file):
            file = file.strip()
            routines.append(self.upload_file(file, queue))
            if len(routines) >= num_workers:
                await asyncio.wait(routines)
                self.logger.info('Upload batch %s', routines)
                routines = []
        if routines:
            self.logger.info('Upload batch %s', routines)
            await asyncio.wait(routines)

        # Add None entries in the queue
        # to signal the consumers to exit
        self.logger.info('Added stop signals to the queue')
        for _ in range(num_workers):
            await queue.put(None)
        self.logger.info('Waiting for the empty queue')
        await queue.join()
        self.logger.info('Ending')

    async def download_files(self, queue, worker_number):
        """
        Consumer. Async worker to download files
        from the api
        :param queue: tasks queue to synchronize workers
        :param worker_number: number of worker for debug
        :return:
        """
        self.logger.info('consumer %s: starting', worker_number)
        while True:
            self.logger.info('consumer %s: waiting for task', worker_number)
            task = await queue.get()
            self.logger.info('consumer %s: has task %s', worker_number, task)
            if task is None:
                # None is the signal to stop.
                queue.task_done()
                break
            else:
                completed_task = await self.get_completed_task(task.id)
                if completed_task:
                    completed_task.file_name = task.file_name
                    await self.download_and_save_file(completed_task)
                    await self.delete_task(completed_task.id)
                    self.logger.info('consumer %s: task %s is done',
                                     worker_number, task)
                queue.task_done()

        self.logger.info('consumer %s: ending', worker_number)

    async def upload_file(self, path, queue):
        """
        Upload file to the api
        :param path:
        :param file_name:
        :return:
        """
        self.logger.info(path)
        try:
            data = await self.read_file(path)
        except OSError as os_exception:
            self.logger.warning('Can\'t read file %s (%s)', path,
                                str(os_exception))
            return
        url_params = {
            "language": ProcessingSettings.language,
            "exportFormat": ProcessingSettings.format
        }
        request_url = self.base_url + "processImage"
        task = await self._make_request(method='POST',
                                        url=request_url,
                                        data=data,
                                        params=url_params,
                                        return_first=True)
        file_name = self.get_file_name(path)
        self.logger.info('Received: %s (%s)', task, file_name)
        if task:
            task.file_name = file_name
            await queue.put(task)

    async def read_file(self, path):
        """
        Read file from disk
        :param path:
        :return:
        """
        async with aiofiles.open(os.path.join(self.input_folder, path),
                                 'rb',
                                 loop=self.event_loop) as image_file:
            data = await image_file.read()
            return data

    async def _make_request(  # pylint: disable=too-many-arguments
            self,
            method,
            url,
            data=None,
            params=None,
            return_first=False):
        with aiohttp.Timeout(self.timeout):
            async with self.session.request(method=method,
                                            url=url,
                                            data=data,
                                            params=params) as resp:
                self.logger.info('method: %s, url: %s; response code: %s',
                                 method, url, resp.status)
                res_data = await resp.read()
                if resp.status == 200:
                    tasks = self.decode_response(res_data)
                    if return_first:
                        tasks = tasks.pop()
                    return tasks
                else:
                    self.logger.warning(
                        'method: %s, url: %s; response code: %s; response: %s',
                        method, url, resp.status, res_data)

    async def get_completed_task(self, task_id):
        """
        Wait for the completed task and return it
        :param task_id: task id from the api
        :return:
        """
        request = {
            'url': self.base_url + 'getTaskStatus',
            'method': 'GET',
            'params': {
                'taskId': task_id
            },
            'return_first': True,
            'data': None
        }
        counter = 1
        while True:
            self.logger.info('Get status for task %s', task_id)
            sleep = counter * 2
            task = await self._make_request(**request)
            self.logger.info('Received task %s', task_id)
            if task is None:
                self.logger.warning('Task %s is None', task_id)
                return None
            elif task.ready():
                self.logger.info('Task %s is ready, url: %s', task.id,
                                 task.download_url)
                return task
            elif task.failed():
                self.logger.warning('Task %s is failed (%s)', task.id, task)
                return None
            self.logger.info('Task %s is not ready, sleep %s sec (retry %s)',
                             task.id, sleep, counter)
            await asyncio.sleep(sleep)
            counter += 1
            if counter > 1000000:
                break

    async def download_and_save_file(self, task):
        """
        Download file and save it on disk
        :param task:
        :return:
        """
        self.logger.info(task)
        path = os.path.join(self.destination_folder, task.file_name)
        self.logger.info('Download task %s with url %s', task.id,
                         task.download_url)
        with aiohttp.Timeout(self.timeout):
            async with aiofiles.open(path, 'wb', loop=self.event_loop) as destination_file, \
                    self.download_session.get(task.download_url) as resp:
                self.logger.info('Task %s with url %s has code %s', task.id,
                                 task.download_url, resp.status)
                self.logger.info('Write task %s to the file %s', task.id, path)
                while True:
                    response_chunk = await resp.content.read(1024)
                    if not response_chunk:
                        break
                    await destination_file.write(response_chunk)

    async def delete_task(self, task_id):
        """
        Remove task from the API
        :param task_id:
        :return:
        """
        self.logger.info('Delete task %s', task_id)
        request = {
            'url': self.base_url + 'deleteTask',
            'method': 'GET',
            'params': {
                'taskId': task_id
            },
            'return_first': True,
            'data': None
        }
        task = await self._make_request(**request)
        return task

    def decode_response(self, xml_response):  # pylint: disable=no-self-use
        """
        Decode xml response of the server. Return Task object
        """
        dom = xml.dom.minidom.parseString(xml_response)
        task_nodes = dom.getElementsByTagName("task")
        res = []
        for task_node in task_nodes:
            task = Task()
            task.id = task_node.getAttribute("id")
            task.status = task_node.getAttribute("status")
            if task.status == "Completed":
                task.download_url = task_node.getAttribute("resultUrl")
            res.append(task)
        return res

    def get_file_name(self, path):
        """
        Slugify file name and add docx suffix
        :param path:
        :return:
        """
        name = '{}.docx'.format(self.slugify(path))
        self.logger.info('Generated name %s for file %s', name, path)
        return name

    @property
    def destination_folder(self):
        """
        destination folder getter
        :return:
        """
        return self._destination_folder

    @destination_folder.setter
    def destination_folder(self, folder_path):
        """
        Destination folder setter
        :param v:
        :return:
        """
        if not os.path.exists(folder_path):
            os.makedirs(folder_path)
        self._destination_folder = folder_path
コード例 #21
0
ファイル: barcamp.py プロジェクト: sk1p/camper
    def before_serialize(self, obj):
        """make sure we have all required data for serializing"""

        ###
        ### process the embed urls
        ###

        def do_embed(x):

            url, t = x.groups()
            html = embedder(url)
            if url == html: # this means no plugin was found
                return x.string[x.start():x.end()]
            return html

        obj.description2 = a_re.sub(do_embed, obj.description)

        ###
        ### remove all sessions which have no room or timeslot anymore
        ###

        for event in obj.eventlist:
            tt = event.get('timetable', {})
            rooms = tt.get('rooms', [])
            timeslots = tt.get('timeslots', [])

            all_idxs = [] # list of all possible indexes of room/time

            for room in rooms:
                for timeslot in timeslots:
                    all_idxs.append("%s@%s" %(room['id'], timeslot['time']))

            if 'sessions' in tt:
                sessions = {}
                for idx, session in tt['sessions'].items():
                    if idx in all_idxs:
                        sessions[idx] = session
                event['timetable']['sessions'] = sessions

        ###
        ### fix all the sids and slugs in the session plan
        ###
        for event in obj.eventlist:
            sessions = event.get('timetable', {}).get('sessions', {})

            # dict with all session slugs and their id except the new ones
            all_slugs = dict([(s['slug'], s['sid']) for s in sessions.values() if s['slug'] is not None])
            
            for session_idx, session in sessions.items():

                # compute sid if missing
                if session.get("sid", None) is None:
                    session['sid'] = unicode(uuid.uuid4())

                # compute slug if missing
                slugify = UniqueSlugify(separator='_', uids = all_slugs.keys(), max_length = 50, to_lower = True)
                orig_slug = session.get("slug", None)

                # we need a new slug if a) the slug is None (new) or 
                # b) another session with this slug exists already
                # we can solve all this with .get() as the default is None anyway
                my_sid = all_slugs.get(orig_slug, None) 
                if my_sid != session['sid']: # for new ones it's None != xyz
                    new_slug = slugify(session['title'])
                    session['slug'] = new_slug
                    all_slugs[new_slug] = session['sid'] 
                event['timetable']['sessions'][session_idx] = session

        return obj
コード例 #22
0
 def save(self, *args, **kwargs):
     if not self.slug:
         slugify_unique = UniqueSlugify(separator='-', to_lower=True)
         self.slug = slugify_unique(self.title)
     super().save(*args, **kwargs)
コード例 #23
0
 def save(self, *args, **kwargs):
     if not self.pk:
         unique_slugify = UniqueSlugify(unique_check=check_unique_slug)
         self.slug = unique_slugify(self.title)
     return super().save(*args, **kwargs)
コード例 #24
0
def download_album(url=None, album=None, destination=None):
    """ Downloads the album to the destination.

    Returns success status
    """

    if not (url or album):
        raise ValueError

    logger.debug("Retrieving info on album %s", url or album)

    if url:
        album = get_album_id(url)

    if album in seen:
        return
    else:
        seen.append(album)

    meta = get_album_metadata(album)

    if not meta:
        logger.error("Error retrieving album metadata for %s", album)

        return

    logger.info("Got album titled %s", meta['title'])

    destination = destination or G.base

    sluger = UniqueSlugify()  # uids=os.listdir(destination)

    if not meta['title']:
        meta['title'] = 'Unknown Artists - Untitled Album'

    album_id = sluger(meta['title'])
    album_path = os.path.join(destination, album_id)

    logger.debug("Saving album to %s", album_path)

    os.makedirs(album_path, exist_ok=True)

    if G['find-albums']:
        for album in find_albums(meta['description'] or ''):
            logger.info("Queuing download of album: %s", album)
            processor.put(lambda: download_album(album=album))

    with open(os.path.join(album_path, 'album-metadata.txt'), 'w') as f:
        f.write('Title %s\r' % meta['title'] or meta['id'])
        f.write('Album ID: %s\r' % album)
        f.write('Description: %s\r' % meta['description'] or '')

    endpoint = "https://api.imgur.com/3/album/%s/images" % album
    try:
        response = request(endpoint)
        res = response.json()
    except Exception:
        return False

    if res['status'] != 200 or not (res['success']):
        return False

    for info in res['data']:
        save_image(info, album_path)
コード例 #25
0
def _unique_slug_check(slug, uids):
    """
    Checks whether there is an image with given slug
    """
    slug = utils.name_in_db(slug)
    return not Image.objects.filter(image__startswith=slug)


# the object used to create unique slugs for names of images
# slugs contain the slugified str versions of the object and an unique number
# except first image:
#     slugified-title
#     slugified-title-1
#     slugified-title-2
#     etc...
slugify_unique = UniqueSlugify(unique_check=_unique_slug_check, to_lower=True)


class ImageQuerySet(models.QuerySet):
    """
    A custom QuerySet that implements deletion of all related files
    """
    def delete(self):
        """
        Perfoms deletion of all related files
        """
        for obj in self:
            obj.delete_files()
        return super().delete()

コード例 #26
0
ファイル: tests.py プロジェクト: sgeulette/awesome-slugify
 def test_init_uids(self):
     slugify = UniqueSlugify(uids=['This-is-my-test', 'This-is-another-test'])
     self.assertEqual(slugify('This % is a test ---'), 'This-is-a-test')
     self.assertEqual(slugify('This % is my test ---'), 'This-is-my-test-1')
コード例 #27
0
    markdown_source = re_title_2.sub(r"\1### \2\3", markdown_source)
    markdown_source = re_title_1.sub(r"\1## \2\3", markdown_source)

    markdown_source = re_title_2_long.sub(r"\1### \2\3", markdown_source)

    def repl_title_1_long(match):
        return (match.group(1) + match.group(2) + "\n" +
                "-" * len(match.group(3)) + match.group(4))

    markdown_source = re_title_1_long.sub(repl_title_1_long, markdown_source)

    return markdown_source


re_image = re.compile(r"!\[([^\]]+)\]\(([^\)]+)\)")
slugify = UniqueSlugify(to_lower=True)

downloaded_images = {}


def download_and_replace_markdown_images(markdown_source, to):
    def repl_and_download_image(match):
        image_alt = match.group(1)
        image_url = match.group(2)

        if not image_url.lower().startswith(
                "http://") and not image_url.lower().startswith("https://"):
            if image_url.startswith("/"):
                image_url = "https://zestedesavoir.com" + image_url
            else:
                click.echos(
コード例 #28
0
def overview(hash, subjectSlug, topicSlug):
    topic = topicFromHash(hash)

    # If the subject and topic slugs for the given hash are incorrect, then
    # redirect the user to the link with the correct slugs.
    if topic.subject.slug != subjectSlug or topic.slug != topicSlug:
        return redirect(
            url_for('topic.overview',
                    hash=hash,
                    subjectSlug=topic.subject.slug,
                    topicSlug=topic.slug))

    # Get list of topics for validation to prevent duplicate names
    topics = [
        t for t in getTopics(topic.subject.id) if t['name'] != topic.name
    ]

    if current_user.is_anonymous(
    ) or current_user.id != topic.subject.accountID:
        ownTopic = False
        user = getUser(topic.subject.accountID)

        # Show not found page if the user is set to private
        if not user.profile:
            abort(404)
    else:
        ownTopic = True
        user = current_user

    if current_user.is_anonymous():
        resultsDate = None
        resultsPercentage = None
    else:
        resultsDate = list(
            reversed(
                db.session.query(Result).filter_by(
                    accountID=current_user.id, topicID=topic.id).order_by(
                        Result.id.desc()).limit(7).all()))
        resultsPercentage = list(
            reversed(
                db.session.query(Result).filter_by(
                    accountID=current_user.id, topicID=topic.id).order_by(
                        Result.id.desc()).limit(7).all()))

        for result in resultsDate:
            result.date = str(result.date)[5:7] + '/' + str(result.date)[8:10]

    # Get last result, will return error if no last result is available
    try:
        lastResult = int(resultsPercentage[-2].percentage)
    except:
        lastResult = None

    if request.method == 'POST':
        error = False

        # Get form contents
        topicName = request.form['topicName']

        # Error validation is also server side in case someone maliciously
        # manipulates the client side Javascript validation
        if topicName.strip(' \t\n\r') == '':
            error = True
        if not bool(re.compile(r'.*[A-Za-z0-9].*').search(topicName)):
            error = True
        if topicName.lower() in (t['name'].lower() for t in topics):
            error = True

        questions = []
        i = 1

        while True:
            # Receive question from form if it exists, if not then end the loop
            try:
                question = request.form['question' + str(i)]
                answer = request.form['answer' + str(i)]
            except:
                break

            # Stop loop if question is empty
            if question == '':
                error = True
                break

            # Add question to list
            questions.append({
                'question': question,
                'answer': answer,
            })

            i += 1

        # Confirm there are more than 2 and less than 100 questions
        if i < 2 or i > 100:
            error = True

        if not error:
            # Update topic name and slug if changed
            if topic.name != topicName:
                topic.name = topicName

                # Update slug of topic
                session['newTopicSubjectID'] = topic.subject.id
                topicSlug = UniqueSlugify(unique_check=uniqueTopicSlug,
                                          to_lower=True)
                topic.slug = topicSlug(topic.name)

            # Update last updated date of subject and topic
            topic.date = datetime.utcnow()
            topic.subject.date = datetime.utcnow()

            # Delete all the old questions of that topic
            db.session.query(Question).filter_by(topicID=topic.id).delete()

            # Add questions to database
            for q in questions:
                row = Question(topicID=topic.id,
                               question=q['question'],
                               answer=q['answer'])
                db.session.add(row)

            db.session.commit()

    # Get colors
    color = getColor(topic.subject.id)
    textcolor = color['text']
    iconcolor = color['icon']
    bgcolor = color['bg']
    navcolor = color['nav']

    # Get topic questions
    userQuestions = db.session.query(Question).filter_by(
        topicID=topic.id).join(Topic).join(Subject).filter(
            Subject.accountID == user.id).order_by(Question.id)
    questions = []
    count = 1
    for question in userQuestions:
        questions.append(
            dict(number=count,
                 question=question.question,
                 answer=question.answer))
        count += 1

    return render_template('topic/overview.html',
                           topic=topic,
                           ownTopic=ownTopic,
                           user=user,
                           questions=questions,
                           topics=topics,
                           resultsDate=resultsDate,
                           resultsPercentage=resultsPercentage,
                           textcolor=textcolor,
                           iconcolor=iconcolor,
                           bgcolor=bgcolor,
                           navcolor=navcolor)
コード例 #29
0
ファイル: models.py プロジェクト: twillis/cmsexp
 def _get_slug_from_tile(self, session):
     existing_slugs = [s[0] for s in session.query(self.__class__._slug).all()]
     slugify = UniqueSlugify(uids=existing_slugs)
     slugify.to_lower = True
     slugify.max_length = 1019
     return slugify(self.title)
コード例 #30
0
ファイル: slug.py プロジェクト: zoglesby/puny
from pecan import conf
from slugify import UniqueSlugify
from . import storage

import uuid
import maya


def unique_check(text, uids):
    permalink = conf.app.public_url + '/view/entry/' + text
    return storage.get_by_permalink(permalink, hidden=True) is None


slugify_unique = UniqueSlugify(unique_check=unique_check)


def generate_slug(mf2):
    seed = None

    props = mf2.get('properties', {})
    if 'name' in props:
        seed = props['name'][0]
    elif 'content' in props:
        if len(props['content']):
            for content in props['content']:
                if isinstance(content, dict):
                    if 'value' in content:
                        seed = content['value']
                    elif 'html' in content:
                        seed = content['html']
                elif isinstance(content, str):
コード例 #31
0
    def get_absolute_url(self):
        return reverse('groups:group', args=[self.slug])

    def get_admins(self):
        return self.admins.all()

    def get_picture(self):
        default_picture = settings.STATIC_URL + 'img/cover.png'
        if self.cover:
            return self.cover.url
        else:
            return default_picture

    def recent_posts(self):
        return self.submitted_posts.filter(created__gte=timezone.now() -
                                           timedelta(days=3)).count()


def group_unique_check(text, uids):
    if text in uids:
        return False
    return not Group.objects.filter(slug=text).exists()


group_slugify = UniqueSlugify(unique_check=group_unique_check,
                              to_lower=True,
                              max_length=80,
                              separator='_',
                              capitalize=False)