示例#1
0
def revision(message):
    mag_slugify = Slugify(to_lower=True)
    mag_slugify.separator = '_'
    message = mag_slugify(message)
    number = binascii.hexlify(os.urandom(5))
    revision_filename = '{}_{}.py'.format(number, message)
    revision_body = template.body.format(number)
    revision_path = os.path.join(REVISIONS_PATH, revision_filename)
    with open(revision_path, 'wb') as revision_file:
        revision_file.write(revision_body)
    msg = 'Generating revision file {}...created'.format(revision_path)
    utils.message(msg)
示例#2
0
from collections import defaultdict

from slugify import Slugify

custom_slugify = Slugify(to_lower=True)
custom_slugify.separator = '_'


def get_oil_column_indexes(xl_sheet):
    '''
        This is tailored to parse the data format of the Excel spreadsheet of
        oil properties that was given to NOAA by Environment Canada (2017).

        Each single oil is represented in the spreadsheet by a contiguous
        group of columns, and only the first column contains the name of
        the oil.

        Return a dict with oil names as keys and a list of associated
        column indexes as values
    '''
    col_headers = defaultdict(list)
    col_prev_name = None

    for idx, col in enumerate(xl_sheet.columns):
        if idx >= 2:
            if col[0].value is not None:
                col_value = col[0].value.strip()

                col_headers[col_value].append(idx)
                col_prev_name = col_value
示例#3
0
import inspect
import os
import os.path

try:
    # Python 3.x
    from urllib.parse import urlsplit
except ImportError:
    # Python 2.x
    from urlparse import urlsplit

from slugify import Slugify

slugify_filename = Slugify(to_lower=True)
slugify_filename.separator = '_'
slugify_filename.safe_chars = '-.'
slugify_filename.max_length = 255


def get_safe_path_name(filename):
    """
    :type filename: unicode
    :rtype: unicode
    """
    safe_filename = slugify_filename(filename)

    return safe_filename.lower()


def get_filename_from_url(url):
示例#4
0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals

from django.db import models, migrations
from slugify import Slugify

my_slugify = Slugify()
my_slugify.separator = '-'
my_slugify.pretranslate = {'&': 'and'}
my_slugify.to_lower = True
my_slugify.max_length = None
my_slugify.capitalize = False
my_slugify.safe_chars = ''


def add_slug(CategoryModel, category_name, category_group, category_code,
             category_description):
    category = CategoryModel.objects.get(code=category_code)
    category.slug = my_slugify(category_name)
    category.save()


def create_category_slugs(apps, schema_editor):
    CategoryModel = apps.get_model("core", "ChCategory")
    print("CategoryModel: ", CategoryModel)

    # Art & cultural events // Arte y eventos culturales
    add_slug(CategoryModel, 'Art & Cultural events', 'Art & Cultural events',
             '01.01', 'Dummy description')

    # Books & Comics // Libros y cómics
示例#5
0
from collections import defaultdict

from slugify import Slugify

custom_slugify = Slugify(to_lower=True)
custom_slugify.separator = '_'


def get_oil_column_indexes(xl_sheet):
    '''
        This is tailored to parse the data format of the Excel spreadsheet of
        oil properties that was given to NOAA by Environment Canada (2017).

        Each single oil is represented in the spreadsheet by a contiguous
        group of columns, and only the first column contains the name of
        the oil.

        Return a dict with oil names as keys and a list of associated
        column indexes as values
    '''
    col_headers = defaultdict(list)
    col_prev_name = None

    for idx, col in enumerate(xl_sheet.columns):
        if idx >= 2:
            if col[0].value is not None:
                col_value = col[0].value.strip()

                col_headers[col_value].append(idx)
                col_prev_name = col_value
示例#6
0
from bs4 import BeautifulSoup
import requests
import os
from utilities import endpoints, utils
from user_agent import generate_user_agent
from slugify import slugify, Slugify
import sys
import re
_slugify = Slugify()
_slugify = Slugify(to_lower=True)
_slugify.separator = '_'

headers = {
    'User-Agent': generate_user_agent(os=None, navigator=None, platform=None, device_type=None),
    'From': '*****@*****.**'
}


def _team_page_scrape():
    page = requests.get(endpoints.teams_endpoint(), headers=headers)
    page.encoding = 'utf-8'
    soup = BeautifulSoup(page.text, "html.parser")
    return soup


# manually add in dif sizes for imgs
# takes url and index to choose size from list
def _change_team_img_size(src, list_index):
    # replace scraped img size with one the sizes below
    regex = "image[\d]+x[\d]+.img.[\d]+.(medium|small|large)"
    sizes = ['320', '640', '768', '1536']
示例#7
0
import inspect
import os
import os.path

try:
    # Python 3.x
    from urllib.parse import urlsplit
except ImportError:
    # Python 2.x
    from urlparse import urlsplit

from slugify import Slugify

slugify_filename = Slugify(to_lower=True)
slugify_filename.separator = '_'
slugify_filename.safe_chars = '-.'
slugify_filename.max_length = 255


def get_safe_path_name(filename):
    """
    :type filename: unicode
    :rtype: unicode
    """
    safe_filename = slugify_filename(filename)

    return safe_filename.lower()


def get_filename_from_url(url):
示例#8
0
 def save(self, *args, **kwargs):
     if not self.slug:
         custom_slugify = Slugify(to_lower=True)
         custom_slugify.separator = '_'
         self.slug = custom_slugify(str(self.name))
     super().save(*args, **kwargs)
示例#9
0
            if metadata.has_key(TITLE_META_NAME) and \
                    metadata.has_key(AUTHOR_META_NAME) and metadata.has_key(ISBN_META_NAME):
                add_metadata(file_name, metadata, directory_name)
            else:
                os.rename(file_name, get_new_file_name_from_old_path(file_name))
        except Exception as err:
            os.rename(file_name, get_new_file_name_from_old_path(file_name))
            print "ERROR: File with errors ", file_name, err.__str__()


TITLE_PROPERTY_NAME = "Title"
TITLE_META_NAME = "/Title"
AUTHOR_PROPERTY_NAME = "Authors"
AUTHOR_META_NAME = "/Author"
ISBN_PROPERTY_NAME = "ISBN"
ISBN_META_NAME = "/ISBN"
FILE_DEFAULT_EXTENSION = ".pdf"
DASH_SEPARATOR = " -- "
DEFAULT_SEPARATOR = " "
DEFAULT_OLD_FOLDER = "old/"
DEFAULT_PENDING_FOLDER = "Waiting4ManualCheck/"

my_slugify = Slugify()
my_slugify.separator = DEFAULT_SEPARATOR
my_slugify.safe_chars = '-.'

set_ebook_metadata()


示例#10
0
def to_slugify(text):
    from slugify import slugify, Slugify, UniqueSlugify
    c__slugify = Slugify(to_lower=True)
    c__slugify.separator = '_'

    return c__slugify( text )
import json
from data import PROBLEM_SUBJECTS

from slugify import Slugify

slug = Slugify(to_lower=True)
slug.separator = '_'

problems = {
   k : {
        'short_name': v['short_name'],
        'categories': {
            slug(kc): {
                'short_name': kc,
                'skills': {
                    slug(skill): { 
                        'short_name': skill, 
                    } for skill in vc 
                }
            } for kc, vc in v['categories'].items()
        }
    } for k, v in PROBLEM_SUBJECTS.items()
}

with open('data.json', 'w') as f:
    f.write(json.dumps(problems, indent=4))