def pre_save(self, instance, add):
        # get currently entered slug
        value = self.value_from_object(instance)
        
        # autopopulate (unless the field is editable and has some value)
        # SNOWY: put self.populate_from before value so that slug is always updated
        if self.populate_from: # and not self.editable:
            slug = slugify(getattr(instance, self.populate_from))
        elif value:
            slug = slugify(value)

        if not slug:
            # no incoming value,  use model name
            slug = instance._meta.module_name

        assert slug, 'slug is defined before trying to ensure uniqueness'

        # ensure the slug is unique (if required)
        if self.unique or self.unique_with:
            slug = self._generate_unique_slug(instance, slug)

        assert slug, 'value is filled before saving'

        setattr(instance, self.name, slug) # XXX do we need this?

        return slug
Example #2
0
    def pre_save(self, instance, add):
        # get currently entered slug
        value = self.value_from_object(instance)

        # autopopulate (unless the field is editable and has some value)
        # SNOWY: put self.populate_from before value so that slug is always updated
        if self.populate_from:  # and not self.editable:
            slug = slugify(getattr(instance, self.populate_from))
        elif value:
            slug = slugify(value)

        if not slug:
            # no incoming value,  use model name
            slug = instance._meta.module_name

        assert slug, 'slug is defined before trying to ensure uniqueness'

        # ensure the slug is unique (if required)
        if self.unique or self.unique_with:
            slug = self._generate_unique_slug(instance, slug)

        assert slug, 'value is filled before saving'

        setattr(instance, self.name, slug)  # XXX do we need this?

        return slug
Example #3
0
 def _dup_categories_slugs(self, categories):
     #use a counter to differentiate them
     counter = 2
     for idx, category in enumerate(categories):
         if idx == 0 :
             category.slug = slugify(category.name)
         else:
             if categories[idx-1].name.lower() == category.name.lower() :
                 category.slug = slugify(category.name) + '-' + str(counter)
                 counter += 1
             else:
                 counter = 2
                 category.slug = slugify(category.name)
     return categories
Example #4
0
 def _dup_categories_slugs(self, categories):
     #use a counter to differentiate them
     counter = 2
     for idx, category in enumerate(categories):
         if idx == 0:
             category.slug = slugify(category.name)
         else:
             if categories[idx - 1].name.lower() == category.name.lower():
                 category.slug = slugify(category.name) + '-' + str(counter)
                 counter += 1
             else:
                 counter = 2
                 category.slug = slugify(category.name)
     return categories
Example #5
0
def add(request, app_label, model, object_id):

    content_type = get_object_or_404(ContentType, app_label=app_label,
                                     model=model)
    model = content_type.model_class()
    object_id = int(object_id)

    item = get_object_or_404(model, id=object_id)
    user = request.user

    new_tags = []

    if request.method == "POST":
        tags = [t.strip() for t in request.POST.get("tags", u"").split(u",")]
        for tag in tags:
            if not item.tags.filter(user=user, slug=slugify(tag)).count():
                tag = Tag(content_type=content_type, object_id=object_id,
                    user=user, name=tag)
                tag.save()
                new_tags.append(tag)

    reindex(item)

    response = {"tags": []}
    for tag in new_tags:
        response["tags"].append(dict(name=tag.name,
                                     id=tag.id,
                                     url=reverse("materials:keyword_index",
                                                 kwargs={"keywords": tag.slug}),
                                     ))

    return response
Example #6
0
def export_feed(request, feed_id, all_items=False):
    feed = get_object_or_404(RSSFeed, id=int(feed_id))

    items = feed.items.all() if all_items else feed.items.filter(exported=False)

    out = StringIO()
    writer = csv.writer(out, quoting=csv.QUOTE_NONNUMERIC)
    writer.writerow(("CR_TITLE", "CR_URL", "CR_CREATE_DATE", "CR_ABSTRACT", "CR_KEYWORDS"))

    for row in items.values_list("title", "url", "date", "description", "keywords"):
        row = list(row)
        row[2] = row[2].date().isoformat() if row[2] else u""
        row = map(lambda x: unescape(x).encode("utf-8"), row)
        writer.writerow(row)

    feed_title = feed.title or "feed%s" % feed.id
    filename = "%s-%s.csv" % (slugify(feed_title), datetime.datetime.now().isoformat())

    out.seek(0)
    response = HttpResponse(out.read(), content_type="text/csv")
    response['Content-Disposition'] = 'inline;filename="%s"' % filename

    items.update(exported=True)

    return response
Example #7
0
    def save(self):
        """
        Parses and saves the publication to the database.
        """
        kwargs = self.fetch()

        # Fetch authors first to error early if we can't find the user.s
        authors = [self.get_author(name) for name in kwargs.pop('authors', [])]

        # Get publication by slug or create it with the discovered attributes
        pub, crt = Publication.objects.update_or_create(slug=slugify(
            kwargs['title']),
                                                        defaults=kwargs)

        # Add authors to the publication if any
        for author in authors:
            # Make sure the author has the correct role
            _ = Membership.objects.get_or_create(
                role=Role.objects.get(slug="blog-author"),
                profile=author.profile)

            # Add author to the publication
            if author not in pub.authors.all():
                pub.authors.add(author)

        return pub, crt
Example #8
0
 def feed_id(self):
     """
     Return a unique to serve as a unique Atom identifier (after being appended to the server URL).
     """
     return slugify(
         "%s %s %s" % (self.user.userid, self.published.strftime("%Y%m%d-%H%M%S"), self.id)
         )
Example #9
0
 def feed_id(self):
     """
     Return a unique to serve as a unique Atom identifier (after being appended to the server URL).
     """
     return slugify("%s %s %s" %
                    (self.user.userid,
                     self.published.strftime("%Y%m%d-%H%M%S"), self.id))
Example #10
0
def _make_slug(title):
    """Make a slug, avoiding to use a "forbidden slug" title
    and a four-digit integer (to avoid clashing with years).
    """
    if title in constants.FORBIDDEN_SLUGS or _four_digit(title):
        title += constants.SLUG_MODIFIER
    return slugify(title)
Example #11
0
def csv_export(query, title):
    query.load_all()

    has_courses = False
    has_libraries = False
    has_community_items = False
    model = None

    for result in query:
        if result.model == Course:
            has_courses = True
        elif result.model == Library:
            has_libraries = True
        elif result.model == CommunityItem:
            has_community_items = True
        if has_courses and has_libraries and has_community_items:
            break

    if has_courses:
        fields = COURSE_FIELDS
        model = Course
    elif has_libraries:
        fields = LIBRARY_FIELDS
        model = Library
    elif has_community_items:
        fields = COMMUNITY_ITEM_FIELDS
        model = CommunityItem
    else:
        raise Http404()

    out = StringIO()
    writer = csv.writer(out, quoting=csv.QUOTE_NONNUMERIC)
    writer.writerow([f[0] for f in fields])

    for result in query:
        if result.model != model:
            continue
        row = []
        object = result.object
        for field_name, attr_name, processor in fields:
            attr_value = getattr(object, attr_name)
            if not attr_value:
                row.append("")
            if processor is not None:
                attr_value = processor(attr_value)
            if not attr_value:
                row.append("")
            if isinstance(attr_value, unicode):
                attr_value = attr_value.encode("utf-8")
            row.append(attr_value)
        writer.writerow(row)

    title = slugify(title)
    if not title:
        title = 'export'

    out.seek(0)
    response = HttpResponse(out.read(), content_type="text/csv")
    response['Content-Disposition'] = 'inline;filename="%s.csv"' % title
    return response
Example #12
0
def forwards(apps, schema_editor):
    from autoslug.settings import slugify

    Deck = apps.get_model('flashcards', 'Deck')

    for deck in Deck.objects.all().iterator():
        deck.slug = slugify(deck.name)
        deck.save(update_fields=['slug'])
Example #13
0
def forwards(apps, schema_editor):
    from autoslug.settings import slugify

    Deck = apps.get_model('flashcards', 'Deck')

    for deck in Deck.objects.all().iterator():
        deck.slug = slugify(deck.name)
        deck.save(update_fields=['slug'])
    def parse_row(self, row):
        existing = False

        pk = "name"                 
        try:
            pk_val = row.getElementsByTagName(pk)[0]
        except Exception as error:
            raise IndexError("%s %s" % (pk, error))

        try:
            pk_text = pk_val.childNodes[0].nodeValue
        except Exception as error:
            raise Exception("%s %s: Error Reading 'text' from 'pk': %s" % (pk, pk_val, error)) 

        slug = slugify(pk_text)
        
        try:
            hospital = self.get_model_class().objects.get(slug=slug)
            existing = True
        except ObjectDoesNotExist:
            hospital = self.get_model_class()(slug=slug)
            existing = False
        except MultipleObjectsReturned:
            raise ImportException("multiple objects returned with %s %s " % (pk, pk_val))

        hospital.name = pk_text
        

        try:
            point = row.getElementsByTagName("Point")[0]
        except Exception as error:
            raise IndexError("%s %s: Error Reading 'Point' from 'Placemark': %s" % (pk, pk_val, error)) 
        
        try:       
            coordinates = point.getElementsByTagName("coordinates")[0]
        except Exception as error:        
            raise Exception("%s %s: Error Reading 'coordinates' from 'Point': %s" % (pk, pk_val, error)) 
        
        try:
            coord_text = coordinates.childNodes[0].nodeValue
        except Exception as error:
            raise Exception("%s %s: Error Reading 'text' from 'coordinates': %s" % (pk, pk_val, error)) 
        
        try:
            longitude, lattitude, other =  coord_text.split(',')
        except Exception as error:
            raise Exception("%s %s: Error splitting 'text': %s" % (pk, pk_val, error)) 
        

        point = fromstr('POINT(%s %s)' % (longitude, lattitude))
        hospital.point = point     

        if existing:
            self.stats['existing'] += 1
        else:
            self.stats['new'] += 1
        print vars(hospital)
        return hospital
def populate_names(apps, schema_editor):
    """Fix new slug name
    """
    from autoslug.settings import slugify
    Contact = apps.get_model('notebook', 'Contact')
    for contact in Contact.objects.all():
        contact.slug = slugify("%s %s" % (contact.first_name,
                                          contact.last_name))
        contact.save()
Example #16
0
    def set_translation(self, lang, field, text):
        """
        Store a translation string in the specified field for a Translatable
        istance

        @type lang: string
        @param lang: a string with the name of the language

        @type field: string
        @param field: a string with the name that we try to get

        @type text: string
        @param text: a string to be stored as translation of the field
        """
        # Do not allow user to set a translations in the default language
        auto_slug_obj = None

        if lang == self._get_default_language():
            raise CanNotTranslate(
                _('You are not supposed to translate the default language. '
                  'Use the model fields for translations in default language'))

        # Get translation, if it does not exits create one
        trans_obj = self.get_translation_obj(lang, field, create=True)
        trans_obj.translation = text
        trans_obj.save()

        # check if the field has an autoslugfield and create the translation
        if INSTALLED_AUTOSLUG:
            if self.translatable_slug:
                try:
                    auto_slug_obj = self._meta.get_field(
                        self.translatable_slug).populate_from
                except AttributeError:
                    pass

        if auto_slug_obj:
            tobj = self.get_translation_obj(lang,
                                            self.translatable_slug,
                                            create=True)
            translation = self.get_translation(lang, auto_slug_obj)
            tobj.translation = slugify(translation)
            tobj.save()

        # Update cache for this specif translations
        key = self._get_translation_cache_key(lang, field)
        cache.set(key, text)
        # remove cache for translations dict
        cache.delete(self._get_translations_cache_key(lang))
        return trans_obj
Example #17
0
    def clean_name(self):
        name = self.cleaned_data['name']

        try:
            qs = self._meta.model.objects.filter(slug=slugify(name))

            if self.instance.pk:
                qs = qs.exclude(pk=self.instance.pk)

            qs.get()

        except self._meta.model.DoesNotExist:
            return name
        raise forms.ValidationError(self.error_messages['duplicate'])
Example #18
0
    def clean_name(self):
        name = self.cleaned_data['name']

        if self._topic:
            return name

        if not defaults.PYBB_DUPLICATE_TOPIC_SLUG_ALLOWED:
            try:
                Topic.objects.get(slug=slugify(name), forum=self._forum)
            except Topic.DoesNotExist:
                return name
            raise forms.ValidationError(self.error_messages['duplicate'])

        return name
Example #19
0
    def clean_name(self):
        name = self.cleaned_data['name']

        if self._topic:
            return name

        if not defaults.PYBB_DUPLICATE_TOPIC_SLUG_ALLOWED:
            try:
                Topic.objects.get(slug=slugify(name), forum=self._forum)
            except Topic.DoesNotExist:
                return name
            raise forms.ValidationError(self.error_messages['duplicate'])

        return name
    def set_translation(self, lang, field, text):
        """
        Store a translation string in the specified field for a Translatable
        istance

        @type lang: string
        @param lang: a string with the name of the language

        @type field: string
        @param field: a string with the name that we try to get

        @type text: string
        @param text: a string to be stored as translation of the field
        """
        # Do not allow user to set a translations in the default language
        auto_slug_obj = None

        if lang == self._get_default_language():
            raise CanNotTranslate(
                _('You are not supposed to translate the default language. '
                  'Use the model fields for translations in default language')
            )

        # Get translation, if it does not exits create one
        trans_obj = self.get_translation_obj(lang, field, create=True)
        trans_obj.translation = text
        trans_obj.save()

        # check if the field has an autoslugfield and create the translation
        if INSTALLED_AUTOSLUG:
            if self.translatable_slug:
                try:
                    auto_slug_obj = self._meta.get_field(self.translatable_slug).populate_from
                except AttributeError:
                    pass

        if auto_slug_obj:
            tobj = self.get_translation_obj(lang, self.translatable_slug, create=True)
            translation = self.get_translation(lang, auto_slug_obj)
            tobj.translation = slugify(translation)
            tobj.save()

        # Update cache for this specif translations
        key = self._get_translation_cache_key(lang, field)
        cache.set(key, text)
        # remove cache for translations dict
        cache.delete(self._get_translations_cache_key(lang))
        return trans_obj
Example #21
0
 def _image_to_picture(self, image_hash):
     src = image_hash['src']
     alt = image_hash['alt'] if image_hash['alt'] else ""
     name = src.split('/')[-1].split('.')[
         0]  # get rid of path and extension
     name = slugify(name)
     # we don't really care about image's slugs, and article_id can be useful
     slug = self._joomla_slugify(image_hash['article_id'], name)
     picture = Picture(
         image=src,
         description=alt,
         name=name,
         slug=slug,
         # creation_date = post['post_date']
     )
     return picture
Example #22
0
File: base.py Project: thoas/pybbm
    def clean_name(self):
        name = self.cleaned_data['name']

        slug = slugify(name)

        if slug in defaults.PYBB_FORBIDDEN_SLUGS:
            raise forms.ValidationError(self.error_messages['forbidden'])

        qs = self._meta.model.objects.filter(Q(slug=slug) | Q(name__iexact=name))

        if self.instance.pk:
            qs = qs.exclude(pk=self.instance.pk)

        if qs.exists():
            raise forms.ValidationError(self.error_messages['duplicate'])

        return name
Example #23
0
    def clean_name(self):
        name = self.cleaned_data['name']

        slug = slugify(name)

        if slug in defaults.PYBB_FORBIDDEN_SLUGS:
            raise forms.ValidationError(self.error_messages['forbidden'])

        qs = self._meta.model.objects.filter(
            Q(slug=slug) | Q(name__iexact=name))

        if self.instance.pk:
            qs = qs.exclude(pk=self.instance.pk)

        if qs.exists():
            raise forms.ValidationError(self.error_messages['duplicate'])

        return name
Example #24
0
    def clean(self):
        cleaned_data = self.cleaned_data

        redirection_type = cleaned_data['redirection_type']

        if (int(redirection_type) == TopicRedirection.TYPE_EXPIRING_REDIRECT
                and
            ('expired' not in cleaned_data or not cleaned_data['expired'])):
            self._errors['expired'] = self.error_class(
                [self.error_messages['expired_not_empty']])

        if not defaults.PYBB_DUPLICATE_TOPIC_SLUG_ALLOWED:
            forum = self.cleaned_data['forum']

            if 'name' in self.cleaned_data and self.cleaned_data['name']:
                name = self.cleaned_data['name']

                slug = slugify(name)

                try:
                    forum.topics.get(slug=slug)
                except Topic.DoesNotExist:
                    pass
                else:
                    self._errors['name'] = self.error_class([
                        self.error_messages['duplicate'] % {
                            'topic': name,
                            'forum': forum
                        }
                    ])
            else:
                try:
                    forum.topics.get(slug=self.topic.slug)
                except Topic.DoesNotExist:
                    pass
                else:
                    self._errors['name'] = self.error_class([
                        self.error_messages['duplicate'] % {
                            'topic': self.topic,
                            'forum': forum
                        }
                    ])

        return cleaned_data
Example #25
0
    def clean(self):
        cleaned_data = self.cleaned_data

        if not defaults.PYBB_DUPLICATE_TOPIC_SLUG_ALLOWED:
            forum = self.cleaned_data['forum']

            name = self.cleaned_data['name']

            slug = slugify(name)

            try:
                forum.topics.get(slug=slug)
            except Topic.DoesNotExist:
                pass
            else:
                self._errors['name'] = self.error_class([self.error_messages['duplicate'] % {
                    'topic': name,
                    'forum': forum
                }])

        return cleaned_data
Example #26
0
    def clean(self):
        cleaned_data = self.cleaned_data

        redirection_type = cleaned_data['redirection_type']

        if (int(redirection_type) == TopicRedirection.TYPE_EXPIRING_REDIRECT and
                ('expired' not in cleaned_data or not cleaned_data['expired'])):
                self._errors['expired'] = self.error_class([self.error_messages['expired_not_empty']])

        if not defaults.PYBB_DUPLICATE_TOPIC_SLUG_ALLOWED:
            forum = self.cleaned_data['forum']

            if 'name' in self.cleaned_data and self.cleaned_data['name']:
                name = self.cleaned_data['name']

                slug = slugify(name)

                try:
                    forum.topics.get(slug=slug)
                except Topic.DoesNotExist:
                    pass
                else:
                    self._errors['name'] = self.error_class([self.error_messages['duplicate'] % {
                        'topic': name,
                        'forum': forum
                    }])
            else:
                try:
                    forum.topics.get(slug=self.topic.slug)
                except Topic.DoesNotExist:
                    pass
                else:
                    self._errors['name'] = self.error_class([self.error_messages['duplicate'] % {
                        'topic': self.topic,
                        'forum': forum
                    }])

        return cleaned_data
Example #27
0
    def clean(self):
        cleaned_data = self.cleaned_data

        if not defaults.PYBB_DUPLICATE_TOPIC_SLUG_ALLOWED:
            forum = self.cleaned_data['forum']

            name = self.cleaned_data['name']

            slug = slugify(name)

            try:
                forum.topics.get(slug=slug)
            except Topic.DoesNotExist:
                pass
            else:
                self._errors['name'] = self.error_class([
                    self.error_messages['duplicate'] % {
                        'topic': name,
                        'forum': forum
                    }
                ])

        return cleaned_data
from django.core.files.base import File
from django.contrib.contenttypes.models import ContentType
from django.db.utils import DatabaseError
from django.utils.encoding import force_text

from autoslug.settings import slugify

logger = logging.getLogger(__name__)

# *****************************************************************************
# *****************************************************************************
# ********************************** General **********************************
# *****************************************************************************
# *****************************************************************************

do_slugify = lambda s: slugify(s.lower()).lower()


def safe_text(text):
    """
    Safe text (encode).

    :return str:
    """
    if PY3:
        return force_text(text, encoding='utf-8')
    else:
        return force_text(text, encoding='utf-8').encode('utf-8')


def lists_overlap(sub, main):
Example #29
0
    'lists_overlap',
    'iterable_to_dict',
    'uniquify_sequence',
    'clean_plugin_data',
    'clone_plugin_data',
    'update_plugin_data',
    'safe_text',
)

from six import PY3

from django.utils.encoding import force_text

from autoslug.settings import slugify

slugify_workspace = lambda s: slugify(s.lower()).lower()


def safe_text(text):
    """
    Safe text (encode).

    :return str:
    """
    if PY3:
        return force_text(text, encoding='utf-8')
    else:
        return force_text(text, encoding='utf-8').encode('utf-8')


def lists_overlap(sub, main):
Example #30
0
def slugify_workspace(s):
    return slugify(s.lower()).lower()
Example #31
0
def _generate_slug(error_post):
    return '{}-{}'.format(
        slugify(error_post.raised_by[7:-3].replace('/', '-')),
        slugify(error_post.exception_type))
Example #32
0
 def clean_name(self):
     name = self.cleaned_data['name'].strip()
     if Room.objects.filter(slug__exact=slugify(name)).count() > 0:
         raise forms.ValidationError(u"Místnost s podobným názvem již existuje.")
     return name
Example #33
0
from autoslug.settings import slugify

from fobi.constants import (
    SUBMIT_VALUE_AS_VAL, SUBMIT_VALUE_AS_REPR, SUBMIT_VALUE_AS_MIX
    )
from fobi.exceptions import ImproperlyConfigured

logger = logging.getLogger(__name__)

# *****************************************************************************
# *****************************************************************************
# ********************************** General **********************************
# *****************************************************************************
# *****************************************************************************

do_slugify = lambda s: slugify(s.lower()).lower()

def safe_text(text):
    """
    Safe text (encode).

    :return str:
    """
    if PY3:
        return force_text(text, encoding='utf-8')
    else:
        return force_text(text, encoding='utf-8').encode('utf-8')

def lists_overlap(sub, main):
    for i in sub:
        if i in main:
Example #34
0
def slugify_attr_name(name):
    return slugify(name.replace('_', '-')).replace('-', '_')
    def parse_row(self, row):
        existing = False

        pk = "name"                 
        try:
            pk_val = row.getElementsByTagName(pk)[0]
        except Exception as error:
            raise IndexError("%s %s" % (pk, error))

        try:
            pk_text = pk_val.childNodes[0].nodeValue
        except Exception as error:
            raise Exception("%s %s: Error Reading 'text' from 'pk': %s" % (pk, pk_val, error)) 

        slug = slugify(pk_text)
        
        try:
            neighborhood = self.get_model_class().objects.get(slug=slug)
            existing = True
        except ObjectDoesNotExist:
            neighborhood = self.get_model_class()(slug=slug)
            existing = False
        except MultipleObjectsReturned:
            raise ImportException("multiple objects returned with %s %s " % (pk, pk_val))

        neighborhood.name = pk_text
        
        
        try:
            description = row.getElementsByTagName("description")[0].childNodes[0].nodeValue
        except Exception as error:
            pass
        
        s2 = description.replace("\n","")
        s3 = s2[s2.find("ZIP"):]
        s4 = s3[:s3.find("</table>")]
        s5 = s4.replace('<td>',"").replace('</td>','').replace("ZIP",'').replace("</tr>",'').replace("<tr>",'')
        neighborhood.long_name = s5

        try:
            ring = row.getElementsByTagName("MultiGeometry")[0]
        except Exception as error:
            raise IndexError("%s %s: Error Reading 'MultiGeometry' from 'Placemark': %s" % (pk, pk_val, error)) 

        try:       
            coordinates = ring.getElementsByTagName("coordinates")[0]
        except Exception as error:        
            raise Exception("%s %s: Error Reading 'coordinates' from 'Point': %s" % (pk, pk_val, error)) 
        
        try:
            coord_text = coordinates.childNodes[0].nodeValue
        except Exception as error:
            raise Exception("%s %s: Error Reading 'text' from 'coordinates': %s" % (pk, pk_val, error)) 
        
        poly = coord_text.split(' ')
        
        point_str = ""
        for point in poly:
            if point: 
                try:
                    longitude, lattitude, other =  point.split(',')
                except Exception as error:
                    raise Exception("%s %s: Error splitting 'text': %s" % (pk, pk_val, error)) 
                point_str += "%s %s," % (longitude, lattitude)
        
        p =  GEOSGeometry('POLYGON((%s))' % point_str.strip()[:-1])
        neighborhood.area = p

        if existing:
            self.stats['existing'] += 1
        else:
            self.stats['new'] += 1
        
        return neighborhood
Example #36
0
 def forwards(self, orm):
     for event in orm.Event.objects.all():
         event.slug = slugify(event.short_link)
         event.save()
Example #37
0
def index(
    request,
    general_subjects=None,
    grade_levels=None,
    course_material_types=None,
    library_material_types=None,
    collection=None,
    keywords=None,
    license=None,
    course_or_module=None,
    community_types=None,
    community_topics=None,
    microsite=None,
    model=None,
    search=False,
    tags=None,
    subjects=None,
    format=None,
    topics=None,
    alignment=None,
    facet_fields=None,
):

    if not facet_fields:
        facet_fields = [
            "general_subjects",
            "grade_levels",
            "keywords",
            "course_material_types",
            "media_formats",
            "cou_bucket",
            "indexed_topics",
        ]
    if model:
        index_namespace = model.namespace
    else:
        index_namespace = None

    if tags or subjects:
        # Tags and subjects are old path filters which are combined to
        # keywords filter now.

        # Redirect to keyword index.
        keywords = tags or subjects
        if index_namespace:
            url = reverse("materials:%s:keyword_index" % index_namespace, kwargs=dict(keywords=keywords))
        else:
            url = reverse("materials:keyword_index", kwargs=dict(keywords=keywords))
        return HttpResponsePermanentRedirect(url)

    if keywords:
        slugified_keywords = slugify(keywords)
        if not slugified_keywords:
            raise Http404()
        if slugified_keywords != keywords:
            # Keywords should be slugified.
            # Redirect to keyword index with slugified keyword.
            if index_namespace:
                url = reverse("materials:%s:keyword_index" % index_namespace, kwargs=dict(keywords=slugified_keywords))
            else:
                url = reverse("materials:keyword_index", kwargs=dict(keywords=slugified_keywords))
            return HttpResponsePermanentRedirect(url)

    query_string_params = {}
    filter_values = {}
    page_title = u"Browse"
    page_subtitle = u""
    breadcrumbs = [{"url": reverse("materials:browse"), "title": u"OER Materials"}]

    if not format:
        format = "html"
        if request.REQUEST.get("feed", None) == "yes":
            format = "rss"
        elif request.REQUEST.get("csv", None) == "yes":
            if not request.user.is_authenticated() or not request.user.is_staff:
                raise Http404()
            format = "csv"

    query = SearchQuerySet().narrow("is_displayed:true")

    if model:
        query = query.models(model)

    path_filter = None

    hidden_filters = {}

    for filter_name in PATH_FILTERS:
        value = locals()[filter_name]
        if value is not None:
            filter = FILTERS[filter_name]
            query = filter.update_query(query, value)
            path_filter = filter_name
            if page_subtitle:
                page_subtitle = u"%s &rarr; %s" % (page_subtitle, filter.page_subtitle(value))
            else:
                page_subtitle = filter.page_subtitle(value)
            filter_values[filter_name] = value

    visible_filters = [
        "search",
        "general_subjects",
        "grade_levels",
        "course_material_types",
        "media_formats",
        "cou_bucket",
    ]

    if microsite:
        microsite = Microsite.objects.get(slug=microsite)
        visible_filters.append("topics")

    search_query = u""

    for filter_name, filter in FILTERS.items():
        if filter_name == path_filter:
            continue
        value = filter.extract_value(request)
        if value is not None:
            query = filter.update_query(query, value)
            query_string_params = filter.update_query_string_params(query_string_params, value)
            filter_values[filter_name] = value
            if filter_name not in visible_filters:
                hidden_filters[filter.request_name] = value
            if filter_name == "search":
                search_query = value

    if search:
        if not search_query:
            if filter_values:
                return HttpResponsePermanentRedirect(
                    reverse("materials:index") + serialize_query_string_params(query_string_params)
                )
            else:
                messages.warning(request, u"You should specify the search term")
                return HttpResponsePermanentRedirect(reverse("materials:advanced_search"))

        page_title = u"Search Results"
        page_subtitle = search_query
        breadcrumbs = [{"url": reverse("materials:search"), "title": page_title}]

    elif model == CommunityItem:
        breadcrumbs = [{"url": reverse("materials:community"), "title": u"OER Community"}]

    if microsite:
        breadcrumbs = [
            {
                "url": reverse("materials:microsite", kwargs=dict(microsite=microsite.slug)),
                "title": u"%s Home" % microsite.name,
            }
        ]

    if not page_subtitle and model:
        page_subtitle = u"Content Type: %s" % model._meta.verbose_name_plural
    elif not page_subtitle and filter_values:
        filter_name = filter_values.keys()[0]
        filter = FILTERS[filter_name]
        page_subtitle = filter.page_subtitle(filter_values[filter_name])

    index_params = IndexParams(request, format, search_query)
    query_string_params = index_params.update_query_string_params(query_string_params)

    index_url = request.path + serialize_query_string_params(query_string_params, ignore_params=["batch_start"])
    if page_subtitle:
        index_title = u"%s: %s" % (page_title, page_subtitle)
    else:
        index_title = page_title

    feed_url = request.path + serialize_query_string_params(
        dict(query_string_params.items() + [("feed", "yes")]), ignore_params=["batch_start"]
    )
    csv_url = request.path + serialize_query_string_params(
        dict(query_string_params.items() + [("csv", "yes")]), ignore_params=["batch_start"]
    )

    batch_end = index_params.batch_start + index_params.batch_size

    if len(filter_values) == 1 and "featured" in filter_values:
        query = query.order_by("-featured_on")
    elif len(filter_values) == 1 and "evaluated_rubrics" in filter_values:
        query = query.order_by("-evaluation_score_rubric_%i" % filter_values["evaluated_rubrics"][0])
    elif index_params.query_order_by is not None:
        query = query.order_by(index_params.query_order_by)

    if index_params.sort_by == "visits" and not filter_values:
        query = query.narrow("visits:[1 TO *]")

    items = []

    if format == "html":

        for facet_field in facet_fields:
            query = query.facet(facet_field)

        total_items = len(query)

        if total_items and index_params.batch_start >= total_items:
            return HttpResponsePermanentRedirect(index_url)

        results = query[index_params.batch_start : batch_end]
        for result in results:
            if result is None:
                continue
            items.append(populate_item_from_search_result(result))

        pagination = Pagination(
            request.path, query_string_params, index_params.batch_start, index_params.batch_size, total_items
        )

        facets = query.facet_counts().get("fields", {})

        index_filters = build_index_filters(visible_filters, facets, filter_values, path_filter, microsite)

        all_keywords = query.count() and facets.get("keywords", []) or []
        if len(all_keywords) > MAX_TOP_KEYWORDS:
            top_keywords = get_tag_cloud(dict(all_keywords[:MAX_TOP_KEYWORDS]), 3, 0, 0)
            all_keywords = get_tag_cloud(dict(all_keywords), 3, 0, 0)
        else:
            top_keywords = get_tag_cloud(dict(all_keywords), 3, 0, 0)
            all_keywords = []

        for keyword in top_keywords:
            name = (
                get_name_from_slug(Keyword, keyword["slug"])
                or get_name_from_slug(Tag, keyword["slug"])
                or keyword["slug"]
            )
            keyword["name"] = name
        for keyword in all_keywords:
            name = (
                get_name_from_slug(Keyword, keyword["slug"])
                or get_name_from_slug(Tag, keyword["slug"])
                or keyword["slug"]
            )
            keyword["name"] = name

        if request.is_ajax():
            output = render_to_string("materials/include/index-items.html", RequestContext(request, locals()))
            data = dict(
                items=output,
                first_item_number=pagination.first_item_number,
                last_item_number=pagination.last_item_number,
                total_items=pagination.total_items,
                page_title=unicode(page_title),
                page_subtitle=page_subtitle and unicode(page_subtitle or u""),
            )
            return JsonResponse(data)
        return direct_to_template(request, "materials/index.html", locals())

    elif format == "rss":
        results = query[0:20]
        for result in results:
            if result is None:
                continue
            item = result.get_stored_fields()
            if item.get("general_subjects"):
                item["general_subjects"] = [get_name_from_id(GeneralSubject, id) for id in item["general_subjects"]]

            namespace = getattr(result.model, "namespace", None)
            if namespace:
                item["get_absolute_url"] = reverse("materials:%s:view_item" % namespace, kwargs=dict(slug=item["slug"]))
            else:
                item["get_absolute_url"] = result.object.get_absolute_url()

            item["model_verbose_name"] = result.model._meta.verbose_name_plural

            items.append(item)

        return direct_to_template(request, "materials/index-rss.xml", locals(), "text/xml")

    elif format == "json":
        results = query[index_params.batch_start : batch_end]

        for result in results:
            if result is None:
                continue
            data = result.get_stored_fields()
            item = {
                "id": result.id,
                "title": data["title"],
                "abstract": data["abstract"],
                "url": data["url"],
                "keywords": data["keywords_names"],
                "subject": [get_slug_from_id(GeneralSubject, id) for id in (data["general_subjects"] or [])],
                "grade_level": [get_slug_from_id(GradeLevel, id) for id in (data["grade_levels"] or [])],
                "collection": data["collection"] and get_name_from_id(Collection, data["collection"]) or None,
            }
            items.append(item)

        return JsonResponse(items)

    elif format == "xml":
        query = query.load_all()
        results = query[index_params.batch_start : batch_end]

        for result in results:
            if result is None:
                continue
            object = result.object
            data = result.get_stored_fields()
            item = {"url": data["url"], "title": data["title"]}
            if data.get("authors"):
                item["author"] = data["authors"][0]
            if data.get("institution"):
                item["institution"] = get_name_from_id(Institution, data["institution"])
            item["abstract"] = data["abstract"]

            license = object.license
            item["copyright_holder"] = license.copyright_holder
            item["license_url"] = license.url
            item["license_name"] = license.name
            item["license_description"] = license.description
            item["license_type"] = license.type
            item["cou_bucket"] = license.bucket

            if data["rating"]:
                item["rating"] = "%.1f" % data["rating"]

            item["fields"] = []
            grade_levels = data.get("grade_levels")
            if grade_levels:
                item["fields"].append(
                    dict(
                        title=u"Grade Level",
                        param=FILTERS["grade_levels"].request_name,
                        value=u",".join([get_slug_from_id(GradeLevel, id) for id in grade_levels]),
                        content=u",".join([get_name_from_id(GradeLevel, id) for id in grade_levels]),
                    )
                )
            general_subjects = data.get("general_subjects")
            if general_subjects:
                item["fields"].append(
                    dict(
                        title=u"Subject",
                        param=FILTERS["general_subjects"].request_name,
                        value=u",".join([get_slug_from_id(GeneralSubject, id) for id in general_subjects]),
                        content=u",".join([get_name_from_id(GeneralSubject, id) for id in general_subjects]),
                    )
                )
            collection = data.get("collection")
            if collection:
                item["fields"].append(
                    dict(
                        title=u"Collection",
                        param=FILTERS["collection"].request_name,
                        value=get_slug_from_id(Collection, collection),
                        content=get_name_from_id(Collection, collection),
                    )
                )
            geographic_relevance = data.get("geographic_relevance")
            if geographic_relevance:
                item["fields"].append(
                    dict(
                        title=u"Geographic Regional Relevance",
                        param=FILTERS["geographic_relevance"].request_name,
                        value=u",".join([get_slug_from_id(GeographicRelevance, id) for id in geographic_relevance]),
                        content=u",".join([get_name_from_id(GeographicRelevance, id) for id in geographic_relevance]),
                    )
                )

            keywords = object.keywords.values("slug", "name")
            if keywords:
                item["fields"].append(
                    dict(
                        title=u"Keywords",
                        param=FILTERS["keywords"].request_name,
                        value=u",".join([k["slug"] for k in keywords]),
                        content=u",".join([k["name"] for k in keywords]),
                    )
                )

            tags = object.tags.values("slug", "name").order_by("slug").distinct()
            if tags:
                item["fields"].append(
                    dict(
                        title=u"Tags",
                        param=FILTERS["keywords"].request_name,
                        value=u",".join([k["slug"] for k in tags]),
                        content=u",".join([k["name"] for k in tags]),
                    )
                )

            items.append(item)

        return direct_to_template(request, "materials/index-xml.xml", locals(), "text/xml")

    elif format == "csv":
        return csv_export(query, index_title)
Example #38
0
def auto_fill_slug(apps, schema_editor):
    Word = apps.get_model('dico', 'Word')
    for word in Word.objects.all():
        word.slug = slugify(word.label)
        word.save()
Example #39
0
def get_path(instance, filename):
    ext = filename.split('.')[-1]
    return os.path.normcase(
        os.path.join('uploads',
                     slugify(instance.name[:60]) + '.' + ext))
Example #40
0
 def save(self, *args, **kwargs):
     self.preset_slug = slugify(self.name)
     super(Preset, self).save(*args, **kwargs)
Example #41
0
def do_slugify(val):
    """Slugify."""
    return slugify(val.lower()).lower()
def redo_slugs(apps, schema_editor):
    """Set resources for MuckRock entitlements"""
    Entitlement = apps.get_model("organizations", "Entitlement")
    for entitlement in Entitlement.objects.all():
        entitlement.slug = slugify(entitlement.name)
        entitlement.save()
Example #43
0
def run():
    cursor = connections["old"].cursor()

    # Delete existing profiles
    print "Removing existing profiles..."
    Profile.objects.all().delete()

    # Delete existing users
    print "Removing existing users..."
    User.objects.exclude(pk=1).delete()

    cursor.execute("SELECT id, login, encrypted_password, title, description, email, "
                   "homepage, institution, institution_url, grade_levels, department, "
                   "specializations, state, biography, why_interested, publish_portfolio, "
                   "publish_profile, role FROM _principals")

    total_items = cursor.rowcount

    print "Creating users..."
    cnt = 0
    for id, login, encrypted_password, title, description, email, \
        homepage, institution, institution_url, grade_levels, department, \
        specializations, state, biography, why_interested, publish_portfolio, \
        publish_profile, role in cursor.fetchall():

        cnt += 1

        if role is None:
            role = u""
        if specializations is None:
            specializations = []
        if homepage is None:
            homepage = u""
        if institution is None:
            institution = u""
        if institution_url is None:
            institution_url = u""
        if department is None:
            department = u""
        if state is None:
            state = u""
        if biography is None:
            biography = u""
        if why_interested is None:
            why_interested = u""
        if publish_portfolio is None:
            publish_portfolio = False
        if publish_profile is None:
            publish_profile = False
        if grade_levels is None:
            grade_levels = []

        principal_id = "oer.member.%i" % id
        try:
            first_name, last_name = title.split(None, 1)
        except ValueError:
            first_name = title
            last_name = u""


        if len(first_name) > 30:
            first_name = first_name[:30]
        if len(last_name) > 30:
            last_name = last_name[:30]

        password = BCRYPT_PREFIX + encrypted_password
        user = User(username=login,
                    password=password,
                    email=email,
                    first_name=first_name,
                    last_name=last_name
                    )
        user.save()

        profile = Profile(user=user,
                          principal_id=principal_id,
                          homepage=force_unicode(homepage),
                          institution=force_unicode(institution),
                          institution_url=force_unicode(institution_url),
                          department=force_unicode(department),
                          specializations=u"\n".join(force_unicode(specializations)),
                          state=force_unicode(state),
                          biography=force_unicode(biography),
                          why_interested=force_unicode(why_interested),
                          publish_portfolio=publish_portfolio,
                          publish_profile=publish_profile,
                          role=force_unicode(role))

        try:
            profile.save()
        except DatabaseError:
            import pprint
            pprint.pprint(locals())
            raise

        for l in grade_levels:
            l = slugify(l)
            try:
                grade_level = GradeLevel.objects.get(slug=l)
            except GradeLevel.DoesNotExist:
                print l
            profile.grade_level.add(grade_level)

        if cnt % 100 == 0:
            print "%i of %i" % (cnt, total_items)

    print "Done!"
Example #44
0
 def forwards(self, orm):
     for profile in orm.UserProfile.objects.all():
         profile.slug = slugify(profile.name)
         profile.save()
def auto_fill_slug(apps, schema_editor):
    Word = apps.get_model("dico", "Word")
    for word in Word.objects.all():
        word.slug = slugify(word.label)
        word.save()
Example #46
0
def run_job(job):
    job.status = RUNNING
    job.save()

    job.harvested_records = 0
    job.processed_records = 0

    f = NamedTemporaryFile()

    try:

        metadata_prefix = job.metadata_prefix.prefix
        format = METADATA_FORMATS[metadata_prefix]
        client = job.repository.client
        client.ignoreBadCharacters(True)
        client.updateGranularity()

        kwargs = {'metadataPrefix': metadata_prefix}
        _time = datetime.time(0, 0, 0)
        if job.from_date:
            kwargs['from_'] = datetime.datetime.combine(job.from_date, _time)
        if job.until_date:
            kwargs['until'] = datetime.datetime.combine(job.until_date, _time)

        if job.set:
            kwargs["set"] = job.set.spec

        writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)

        writer.writerow(format.header)

        for header, metadata, about in client.listRecords(**kwargs):
            if header.isDeleted():
                continue
            identifier = str(header.identifier())
            try:
                writer.writerow(format.process_record(identifier, metadata))
                job.harvested_records += 1
            except:
                text = u"Error while processing record identifier: %s\n%s" % (identifier,
                                                                              traceback.format_exc())
                Error(text=text, job=job).save()
            job.processed_records += 1

            # Save the job after every 100 processed records
            if not job.processed_records % 100:
                job.save()

        if job.errors.count():
            job.status = ERROR
        else:
            job.status = COMPLETE

        now = datetime.datetime.now()

        if job.set:
            job.set.harvested_on = now
            job.set.save()

        job.finished_on = now

        filename_parts = [slugify(unicode(job.repository))]
        if job.set:
            filename_parts.append(slugify(job.set.spec))
        filename_parts.append(now.isoformat())
        filename = "-".join(filename_parts) + ".csv"
        job.file.save(filename, File(f))

    except NoRecordsMatchError:
        job.status = NO_RECORDS_MATCH

    except:
        job.status = ERROR
        Error(text=traceback.format_exc(), job=job).save()

    finally:
        job.save()
        f.close()

    job.notify()
Example #47
0
 def save(self, *args, **kwargs):
     if not self.slug:
         self.slug = slugify(self.title)
     super(Tag, self).save(*args, **kwargs)
Example #48
0
def slugify_workspace(val):
    """Slugify workspace."""
    return slugify(val.lower()).lower()
Example #49
0
__author__ = 'Artur Barseghyan <*****@*****.**>'
__copyright__ = 'Copyright (c) 2013 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = (
    'slugify_workspace', 'lists_overlap', 'iterable_to_dict',
    'uniquify_sequence', 'clean_plugin_data', 'clone_plugin_data',
    'update_plugin_data', 'safe_text',
    )

from six import PY3

from django.utils.encoding import force_text

from autoslug.settings import slugify

slugify_workspace = lambda s: slugify(s.lower()).lower()

def safe_text(text):
    """
    Safe text (encode).

    :return str:
    """
    if PY3:
        return force_text(text, encoding='utf-8')
    else:
        return force_text(text, encoding='utf-8').encode('utf-8')

def lists_overlap(sub, main):
    for i in sub:
        if i in main:
Example #50
0
def do_slugify(s):
    """Slugify."""
    return slugify(s.lower()).lower()
Example #51
0
def slash_slugify(value):
    slug_parts = []
    for part in value.split('/'):
        slug_parts.append(slugify(part))
    return '/'.join(slug_parts)