示例#1
0
    def test_safe_chars(self):
        slugify = Slugify()

        slugify.safe_chars = '_'
        self.assertEqual(slugify('test_sanitize'), 'test_sanitize')

        slugify.safe_chars = "'"
        self.assertEqual(slugify('Конь-Огонь'), "Kon'-Ogon'")
示例#2
0
def revision(message):
    mag_slugify = Slugify(to_lower=True)
    mag_slugify.separator = '_'
    message = mag_slugify(message)
    number = binascii.hexlify(os.urandom(5))
    revision_filename = '{}_{}.py'.format(number, message)
    revision_body = template.body.format(number)
    revision_path = os.path.join(REVISIONS_PATH, revision_filename)
    with open(revision_path, 'wb') as revision_file:
        revision_file.write(revision_body)
    msg = 'Generating revision file {}...created'.format(revision_path)
    utils.message(msg)
示例#3
0
    def test_stop_words(self):
        slugify = Slugify(stop_words=['a', 'the'])

        self.assertEqual(slugify('A red apple'), 'red-apple')
        self.assertEqual(slugify('The4 red apple'), 'The4-red-apple')

        self.assertEqual(slugify('_The_red_the-apple'), 'red-apple')
        self.assertEqual(slugify('The__red_apple'), 'red-apple')

        slugify.safe_chars = '*'
        self.assertEqual(slugify('*The*red*apple'), '*-*red*apple')
        self.assertEqual(slugify('The**red*apple'), '**red*apple')

        slugify.stop_words = ['x', 'y']
        self.assertEqual(slugify('x y n'), 'n')
示例#4
0
    def __init__(
        self,
        target_case="lower",
        separator=" ",
    ):

        #load needed python module
        from slugify import Slugify

        self.slug_ = Slugify(
            separator=separator,
            to_lower=(target_case == "lower"),
        )
示例#5
0
    def __init__(self):
        self.db = MySQLdb.connect(
            user='******', passwd='root', host='localhost', db='myanilist', charset='utf8', use_unicode=True
        )
        self.cursor = self.db.cursor()

        # Enforce UTF-8 for the connection.
        self.cursor.execute('SET NAMES utf8mb4')
        self.cursor.execute('SET CHARACTER SET utf8mb4')
        self.cursor.execute('SET character_set_connection=utf8mb4')

        # Slugify instance
        self.slugger = Slugify(to_lower=True)
示例#6
0
 def test_pretranslate(self):
     EMOJI_TRANSLATION = {
         u'ʘ‿ʘ': u'smiling',
         u'ಠ_ಠ': u'disapproval',
         u'♥‿♥': u'enamored',
         u'♥': u'love',
         u'(c)': u'copyright',
         u'©': u'copyright',
     }
     slugify_emoji = Slugify(pretranslate=EMOJI_TRANSLATION)
     self.assertEqual(slugify_emoji(u'ʘ‿ʘ'), u'smiling')
     self.assertEqual(slugify_emoji(u'ಠ_ಠ'), u'disapproval')
     self.assertEqual(slugify_emoji(u'(c)'), u'copyright')
     self.assertEqual(slugify_emoji(u'©'), u'copyright')
示例#7
0
文件: item.py 项目: Libisch/dbs-back
def create_slug(document, collection_name):
    collection_slug_map = {
        'places': {'En': 'place',
                   'He': u'מקום',
                  },
        'familyNames': {'En': 'familyname',
                        'He': u'שםמשפחה',
                       },
        'lexicon': {'En': 'lexicon',
                    'He': u'מלון',
                   },
        'photoUnits': {'En': 'image',
                       'He': u'תמונה',
                      },
        'photos': {'En': 'image',
                   'He': u'תמונה',
                  },
        # TODO: remove references to the genTreeIndividuals collection - it is irrelevant and not in use
        'genTreeIndividuals': {'En': 'person',
                               'He': u'אדם',
                              },
        'synonyms': {'En': 'synonym',
                     'He': u'שם נרדף',
                    },
        'personalities': {'En': 'luminary',
                          'He': u'אישיות',
                          },
        'movies': {'En': 'video',
                   'He': u'וידאו',
                  },
    }
    try:
        headers = document['Header'].items()
    except KeyError:
        # persons collection will be handled here as the cllection's docs don't have a Header
        # it's the calling function responsibility to add a slug
        # TODO: refactor to more specific logic, instead of relying on them not having a Header
        return

    ret = {}
    slugify = Slugify(translate=None, safe_chars='_')
    for lang, val in headers:
        if val:
            collection_slug = collection_slug_map[collection_name].get(lang)
            if collection_slug:
                slug = slugify('_'.join([collection_slug, val.lower()]))
                ret[lang] = slug.encode('utf8')
    return ret
示例#8
0
    def form_valid(self, form):
        form.instance.job_title = 'MG'

        property_id = create_unique_identifier()

        PropertyIdentifier.objects.create(identifier=property_id)
        form.instance.property_id = property_id
        property_name = form.cleaned_data['works_for']
        c_slugify = Slugify(to_lower=True)
        property_slug = c_slugify(property_name)

        Hotel.objects.create(name=property_name,
                             slug=property_slug,
                             property_id=property_id)

        return super().form_valid(form)
示例#9
0
 def _add_slug(self, new_doc, title, lang):
     if title:
         collection = new_doc.get("collection", "")
         slug_parts = []
         if collection in constants.SLUG_LANGUAGES_MAP:
             if lang in constants.SLUG_LANGUAGES_MAP[collection]:
                 slug_collection = constants.SLUG_LANGUAGES_MAP[collection][lang]
             else:
                 slug_collection = constants.SLUG_LANGUAGES_MAP[collection]["en"]
         else:
             slug_collection = None
         if new_doc["source"] != "clearmash" or slug_collection is None or lang not in ["en", "he"]:
             slug_parts.append(new_doc["source"])
         if slug_collection:
             slug_parts.append(slug_collection)
         slug_parts.append(title.lower())
         slugify = Slugify(translate=None, safe_chars='_')
         slug = slugify(u'_'.join([p.replace("_", "-") for p in slug_parts]))
         new_doc["slug_{}".format(lang)] = slug
示例#10
0
def slugify(values, ensure_unique=False, **kwargs):
    """
    Given a sequence of strings, returns a standardized version of the sequence.
    If ``ensure_unique`` is True, any duplicate strings will be appended with
    a unique identifier. Any kwargs will be passed to the Slugify or
    UniqueSlugify class constructor

    See: https://github.com/dimka665/awesome-slugify
    """
    # Default to all lowercase
    slug_args = {'to_lower': True}
    slug_args.update(kwargs)

    if ensure_unique:
        custom_slugify = UniqueSlugify(**slug_args)
    else:
        custom_slugify = Slugify(**slug_args)

    return tuple(custom_slugify(value) for value in values)
示例#11
0
 def init_prometheus(self):
     self.slugify = Slugify(to_lower=True)
     self.slugify.separator = "_"
     self.slugify.pretranslate = {"+": "plus", "-": "minus"}
     for n, r in enumerate(registers):
         s = self.slugify(f"empro_{r['short_name']}")
         registers[n]["slug"] = s
         if s in self.metrics:
             print(s, r)
         if r["datatype"] in [
                 "uint8",
                 "uint16",
                 "sint16",
                 "uint32",
                 "sint32",
                 "fl32",
         ]:
             self.metrics[s] = Gauge(s, r["description"],
                                     ["unit", "register"])
         else:
             self.metrics[s] = Info(s, r["description"], ["register"])
示例#12
0
def ajouter_article():

    title = "Ajouter article"
    #Formulaire d'ajout des informations de l'article
    form = AjouterArticleForm()
    #Envoi du formulaire des informations
    if form.validate_on_submit():
        if form.picture.data:
            #Enregistrement des infromation de la poste
            titre_cap = form.titre.data
            titre_slugify = Slugify(to_lower=True)
            imagefile_thumb = save_picture_thumb(form.picture.data)
            post = Contenu(titre=titre_cap.capitalize(),
                           cont=form.cont.data,
                           thumb=imagefile_thumb,
                           slug=titre_slugify(form.titre.data),
                           rub_cont=form.rubrique.data,
                           cont_user=current_user)
            db.session.add(post)
            db.session.commit()
            flash('ajouter reussie', 'success')
            return redirect(url_for('posts.tous_articles'))
    return render_template('posts/ajouterpost.html', title=title, form=form)
示例#13
0
#!/usr/bin/env python
from math import isclose
from numbers import Number
import logging

try:
    from slugify import Slugify
    custom_slugify = Slugify(to_lower=True, separator='_')
except ImportError:
    print("You need the awesome-slugify pacakge to run the importing code")

from adios_db.models.oil.oil import Oil
from adios_db.data_sources.mapper import MapperBase

logger = logging.getLogger(__name__)


class EnvCanadaRecordMapper(MapperBase):
    """
    A translation/conversion layer for the Environment Canada imported
    record object.
    This is intended to be used interchangeably with either an Environment
    Canada record or record parser object.  Its purpose is to generate
    named attributes that are suitable for creation of a NOAA Oil Database
    record.
    """
    def __init__(self, record):
        """
        :param record: A parsed object representing a single oil or
                       refined product.
        :type record: A record parser object.
示例#14
0
#
#     This program is distributed in the hope that it will be useful,
#     but WITHOUT ANY WARRANTY; without even the implied warranty of
#     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#     GNU General Public License for more details.
#
#     You should have received a copy of the GNU General Public License
#     along with this program.  If not, see <http://www.gnu.org/licenses/>.
#

import os
from urllib.parse import urlparse, parse_qs
from itertools import product
from slugify import Slugify

custom_slugify = Slugify(to_lower=True)
custom_slugify.safe_chars = '_'

HEADERS = {
    'user-agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) '
                   'AppleWebKit/537.36 (KHTML, like Gecko) '
                   'Chrome/45.0.2454.101 Safari/537.36'),
}


def _state_enrollment_url_list(output_dir):
    """One off method for dealing with non-standard format of state-level enrollment data"""
    var_map = {
        '1': 'grade-by-gender',
        '2': 'race_ethnicity-by-gender',
        '3': 'race_ethnicity-by-special-education',
示例#15
0
#
# Description: Read file geodatabase, create tables for subtypes and domains
#              Prepare sql scripts for indexes and foreign key constraints
# Author: George Ioannou
# Copyright: Cartologic 2017-2020
#
##
from os import getcwd, mkdir, path

from ruamel.yaml import YAML
from slugify import Slugify

import arcpy
import sys

slugify = Slugify(translate=None)


class YAMLObject(YAML):
    def __init__(self):
        YAML.__init__(self)
        self.allow_unicode = True
        self.encoding = 'utf-8'


yaml = YAMLObject()


class FileGDB:
    def __init__(self, workspace, a_srs):
        self.workspace = workspace
from slugify import slugify, Slugify
import re
import math


def getSoupFromUrl(url):
    result = requests.get(url)
    if result.status_code == 200:
        print 'Request successful'
        return BeautifulSoup(result.text, "html.parser")
    else:
        print 'Request failed', url
        return "Failed"


slugify_normalize = Slugify()
slugify_normalize.safe_chars = ',./'


def normalize(s):
    return slugify_normalize(s, separator=' ')


url = 'http://courses.monoprix.fr'
result = pd.DataFrame(columns=[
    "enseigne", "url", "nom_produit", "marque", "quantite", "poids_volume",
    "poids_volume_total", "unite", "descriptif", "ingredients", "conservation",
    "valeur_energetique", "origine", "prix", "prix_au_poids"
])
result = result.fillna(0)
示例#17
0
 def autoslug(self):
     slugify = Slugify(to_lower=True, pretranslate={'ĉ': 'ch', 'ĝ': 'gh', 'ĥ': 'hh', 'ĵ': 'jh', 'ŝ': 'sh'})
     return slugify(self.name) or '--'
示例#18
0
 def test_fold_abbr_3(self):
     slugify = Slugify(fold_abbrs=True)
     self.assertEqual('Back-in-USSR-Text',
                      slugify('Back in U.S.S.R. () Text'))
示例#19
0
 def test_fold_abbr(self):
     slugify = Slugify(fold_abbrs=True)
     self.assertEqual('Back-in-USSR', slugify('Back in U.S.S.R.'))
     self.assertEqual('Back-in-USSR', slugify('Back in U.S.S.R'))
import pandas as pd
import numpy as np
from slugify import slugify, Slugify
import re
import math

def getSoupFromUrl(url):
    result = requests.get(url)
    if result.status_code == 200:
        print 'Request successful'
        return BeautifulSoup(result.text, "html.parser")
    else:
        print 'Request failed', url
        return "Failed"

slugify_normalize = Slugify()
slugify_normalize.safe_chars = ',./'
def normalize(s):
    return slugify_normalize(s, separator=' ')

url = 'http://courses.monoprix.fr'
result = pd.DataFrame(columns=["enseigne", "url", "nom_produit", "marque", "quantite", "poids_volume", "poids_volume_total", "unite", "descriptif", "ingredients", "conservation","valeur_energetique", "origine", "prix", "prix_au_poids"])
result = result.fillna(0)


# file to write url that did'nt work properly
with open("Produits_Problem_5.txt", "a") as f_w_pb:
    # file with urls to scrap
    n = 0
    for line in open('Produits_Problem_4.txt', 'r'):
示例#21
0
from __future__ import (absolute_import, division, print_function, unicode_literals)

import inspect
import os
import os.path

try:
    # Python 3.x
    from urllib.parse import urlsplit
except ImportError:
    # Python 2.x
    from urlparse import urlsplit

from slugify import Slugify

slugify_filename = Slugify(to_lower=True)
slugify_filename.separator = '_'
slugify_filename.safe_chars = '-.'
slugify_filename.max_length = 255


def get_safe_path_name(filename):
    """
    :type filename: unicode
    :rtype: unicode
    """
    safe_filename = slugify_filename(filename)

    return safe_filename.lower()

示例#22
0
import abc
import re
from uuid import uuid4

import requests
from django.conf import settings

from kittenteach.core.utils.utils import singleton, get_client_ip
from slugify import Slugify

c_slugify = Slugify(separator='_', to_lower=True)


class ABClient(abc.ABC):
    cookie_fmt = 'client_id_{}'

    @abc.abstractmethod
    def participate(self, *args):
        pass

    @abc.abstractmethod
    def convert(self, *args):
        pass

    @abc.abstractmethod
    def check_cookie(self, *args):
        pass

    @abc.abstractmethod
    def set_cookie(self, *args):
        pass
示例#23
0
 def test_wrong_argument_type(self):
     self.assertRaises(ValueError, lambda: Slugify(pretranslate={1, 2}))
示例#24
0
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.html import urlize
from django.utils.text import Truncator
from etc.models import InheritedModelMetaclass
from siteflags.models import ModelWithFlag
from slugify import Slugify, CYRILLIC

from ..integration.base import RemoteSource
from ..integration.utils import get_image_from_url
from ..signals import sig_entity_new, sig_entity_published, sig_support_changed
from ..utils import UTM, TextCompiler, BasicTypograph

USER_MODEL: str = getattr(settings, 'AUTH_USER_MODEL')
SLUGIFIER = Slugify(pretranslate=CYRILLIC, to_lower=True, safe_chars='-._', max_length=200)


if False:  # pragma: nocover
    from .forms import CommonEntityForm
    from .realms import RealmBase
    from ..models import Category, User


class ModelWithAuthorAndTranslator(models.Model):
    """Класс-примесь для моделей, требующих поля с автором и переводчиком."""

    _hint_userlink: str = (
        '<br><b>[u:<ид>:<имя>]</b> формирует ссылку на профиль пользователя pythonz. Например: [u:1:идле].')

    author = models.CharField(
示例#25
0
            if metadata.has_key(TITLE_META_NAME) and \
                    metadata.has_key(AUTHOR_META_NAME) and metadata.has_key(ISBN_META_NAME):
                add_metadata(file_name, metadata, directory_name)
            else:
                os.rename(file_name, get_new_file_name_from_old_path(file_name))
        except Exception as err:
            os.rename(file_name, get_new_file_name_from_old_path(file_name))
            print "ERROR: File with errors ", file_name, err.__str__()


TITLE_PROPERTY_NAME = "Title"
TITLE_META_NAME = "/Title"
AUTHOR_PROPERTY_NAME = "Authors"
AUTHOR_META_NAME = "/Author"
ISBN_PROPERTY_NAME = "ISBN"
ISBN_META_NAME = "/ISBN"
FILE_DEFAULT_EXTENSION = ".pdf"
DASH_SEPARATOR = " -- "
DEFAULT_SEPARATOR = " "
DEFAULT_OLD_FOLDER = "old/"
DEFAULT_PENDING_FOLDER = "Waiting4ManualCheck/"

my_slugify = Slugify()
my_slugify.separator = DEFAULT_SEPARATOR
my_slugify.safe_chars = '-.'

set_ebook_metadata()


示例#26
0
 def __init__(self):
     self.slug = Slugify(to_lower=True)
     self.slug.separator = ''
示例#27
0
 def test_prevent_double_pretranslation(self):
     slugify = Slugify(pretranslate={'s': 'ss'})
     self.assertEqual(slugify('BOOST'), 'BOOSST')
示例#28
0
from geonode.security.perm_utils import has_direct_or_group_perm

DEFAULT_TITLE = ""
DEFAULT_ABSTRACT = ""

INVALID_PERMISSION_MESSAGE = _("Invalid permission level.")

ALPHABET = string.ascii_uppercase + string.ascii_lowercase + \
    string.digits + '-_'
ALPHABET_REVERSE = dict((c, i) for (i, c) in enumerate(ALPHABET))
BASE = len(ALPHABET)
SIGN_CHARACTER = '$'

http_client = httplib2.Http()

custom_slugify = Slugify(separator='_')


def _get_basic_auth_info(request):
    """
    grab basic auth info
    """
    meth, auth = request.META['HTTP_AUTHORIZATION'].split()
    if meth.lower() != 'basic':
        raise ValueError
    username, password = base64.b64decode(auth).split(':')
    return username, password


def batch_permissions(request):
    # TODO
示例#29
0
# coding=utf-8
"""**Utilities functions**
"""

__author__ = 'Ismail Sunni <*****@*****.**>'
__revision__ = '$Format:%H$'
__date__ = '23/04/2014'
__license__ = ''
__copyright__ = ''


from slugify import Slugify

version_slugify = Slugify()
version_slugify.safe_chars = '.'
示例#30
0
import sys
import time
import urllib
import traceback
import threading
import json
import shutil
import errno
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
try:
    from slugify import Slugify
except ImportError:
    from octoprint.vendor.awesome_slugify import Slugify
_SLUGIFY = Slugify()
_SLUGIFY.safe_chars = "-_.()[] "

# create the module level logger
from octoprint_octolapse.log import LoggingConfigurator
logging_configurator = LoggingConfigurator()
logger = logging_configurator.get_logger(__name__)

from threading import Timer
FLOAT_MATH_EQUALITY_RANGE = 0.0000001

def get_float(value, default):
    if value is None:
        return default
    try:
        return float(value)
示例#31
0
文件: slugifier.py 项目: caervs/blot
 def __init__(self, source_attr='title'):
     super(Slugifier, self).__init__()
     self.slugifier = Slugify(to_lower=True)
     self.source_attr = source_attr
示例#32
0
 def test_pretranslate_lambda(self):
     slugify_reverse = Slugify(pretranslate=lambda value: value[::-1])
     self.assertEqual(slugify_reverse('slug'), 'guls')
示例#33
0
from collections import defaultdict

from slugify import Slugify

custom_slugify = Slugify(to_lower=True)
custom_slugify.separator = '_'


def get_oil_column_indexes(xl_sheet):
    '''
        This is tailored to parse the data format of the Excel spreadsheet of
        oil properties that was given to NOAA by Environment Canada (2017).

        Each single oil is represented in the spreadsheet by a contiguous
        group of columns, and only the first column contains the name of
        the oil.

        Return a dict with oil names as keys and a list of associated
        column indexes as values
    '''
    col_headers = defaultdict(list)
    col_prev_name = None

    for idx, col in enumerate(xl_sheet.columns):
        if idx >= 2:
            if col[0].value is not None:
                col_value = col[0].value.strip()

                col_headers[col_value].append(idx)
                col_prev_name = col_value
示例#34
0
import io
import logging
import os.path as op
from collections import defaultdict

from bioservices import KEGG
from slugify import Slugify

import ssbio.utils
from ssbio.protein.sequence.seqprop import SeqProp

log = logging.getLogger(__name__)
custom_slugify = Slugify(safe_chars='-_')
bs_kegg = KEGG()


class KEGGProp(SeqProp):
    def __init__(self, seq, id, fasta_path=None, txt_path=None, gff_path=None):
        SeqProp.__init__(self,
                         seq=seq,
                         id=id,
                         sequence_path=fasta_path,
                         metadata_path=txt_path,
                         feature_path=gff_path)
        self.kegg = id

    @SeqProp.metadata_path.setter
    def metadata_path(self, m_path):
        """Provide pointers to the paths of the metadata file

        Args:
示例#35
0
    def name_slug(self):
        custom_slugify = Slugify(to_lower=True)
        custom_slugify.safe_chars = '+.ii'

        return custom_slugify(self.name)
示例#36
0
    def test_only_stop_words_text(self):
        slugify = Slugify(stop_words=['a', 'the'])

        self.assertEqual(slugify('The A'), 'The-A')
示例#37
0
    def model_slug(self):
        custom_slugify = Slugify(to_lower=True)
        custom_slugify.safe_chars = '+.ii'

        return custom_slugify(self.model)
示例#38
0
    def test_to_lower_arg(self):
        slugify = Slugify()
        slugify.to_lower = True

        self.assertEqual(slugify('Test TO lower'), 'test-to-lower')
        self.assertEqual(slugify('Test TO lower', to_lower=False), 'Test-TO-lower')
import json
from data import PROBLEM_SUBJECTS

from slugify import Slugify

slug = Slugify(to_lower=True)
slug.separator = '_'

problems = {
   k : {
        'short_name': v['short_name'],
        'categories': {
            slug(kc): {
                'short_name': kc,
                'skills': {
                    slug(skill): { 
                        'short_name': skill, 
                    } for skill in vc 
                }
            } for kc, vc in v['categories'].items()
        }
    } for k, v in PROBLEM_SUBJECTS.items()
}

with open('data.json', 'w') as f:
    f.write(json.dumps(problems, indent=4))