Example #1
0
def moveCoverToBlobstore(album):
    if not album.small_filetype:
        return

    from slughifi import slughifi
    fn = "%s_%s" % (slughifi(album.artist), slughifi(album.title))
    small_file = files.blobstore.create(
        mime_type=album.small_filetype,
        _blobinfo_uploaded_filename="%s_small.png" % fn)
    large_file = files.blobstore.create(
        mime_type=album.large_filetype,
        _blobinfo_uploaded_filename="%s_big.png" % fn)

    with files.open(small_file, 'a') as small:
        small.write(album.small_cover)
    with files.open(large_file, 'a') as large:
        large.write(album.large_cover)

    files.finalize(small_file)
    files.finalize(large_file)

    album.cover_small = files.blobstore.get_blob_key(small_file)
    album.cover_large = files.blobstore.get_blob_key(large_file)

    del album.small_cover
    del album.large_cover
    del album.large_filetype
    del album.small_filetype

    album.put()
Example #2
0
    def save(self, *args, **kwargs):
        from slughifi import slughifi
        from catalogue.utils import ExistingFile, remove_zip

        try:
            old = BookMedia.objects.get(pk=self.pk)
        except BookMedia.DoesNotExist:
            old = None
        else:
            # if name changed, change the file name, too
            if slughifi(self.name) != slughifi(old.name):
                self.file.save(None, ExistingFile(self.file.path), save=False, leave=True)

        super(BookMedia, self).save(*args, **kwargs)

        # remove the zip package for book with modified media
        if old:
            remove_zip("%s_%s" % (old.book.slug, old.type))
        remove_zip("%s_%s" % (self.book.slug, self.type))

        extra_info = self.extra_info
        extra_info.update(self.read_meta())
        self.extra_info = extra_info
        self.source_sha1 = self.read_source_sha1(self.file.path, self.type)
        return super(BookMedia, self).save(*args, **kwargs)
 def save(self, *args, **kwargs):
     
     for lang in settings.LANGUAGES:
         label = getattr(self, 'label_' + lang[0])
         label = "/" + slughifi(label) + "/"
         
         try:
             CustomFlatPage.objects.get(url__iexact=label)
             label_parent = getattr(self.parent, 'label_' + lang[0])
             label +=  slughifi(label_parent) + "/"
             setattr(self, 'url_' + lang[0], label)
         except CustomFlatPage.DoesNotExist:
             setattr(self, 'url_' + lang[0], label)
     
     # Save current Menu.
     self.caption = self.get_caption(with_level = True)
     super(Menu, self).save(*args, **kwargs) # Call the "real" save() method.
     
     # Update dependant flatpages' links
     for fpl in self.flatpage_links.all():
         url = getattr(self, 'url_' + fpl.language)
         fpl.url = url
         fpl.save()
         
     # Update descendants
     self.update_descendants()
Example #4
0
def moveCoverToBlobstore(album):
  if not album.small_filetype:
    return

  from slughifi import slughifi
  fn = "%s_%s"%(slughifi(album.artist), slughifi(album.title))
  small_file = files.blobstore.create(mime_type=album.small_filetype,
                                      _blobinfo_uploaded_filename="%s_small.png"%fn)
  large_file = files.blobstore.create(mime_type=album.large_filetype,
                                      _blobinfo_uploaded_filename="%s_big.png"%fn)

  with files.open(small_file, 'a') as small:
    small.write(album.small_cover)
  with files.open(large_file, 'a') as large:
    large.write(album.large_cover)

  files.finalize(small_file)
  files.finalize(large_file)

  album.cover_small = files.blobstore.get_blob_key(small_file)
  album.cover_large = files.blobstore.get_blob_key(large_file)

  del album.small_cover
  del album.large_cover
  del album.large_filetype
  del album.small_filetype

  album.put()
Example #5
0
 def get_dynamic_path(media, filename, ext=ext):
     # how to put related book's slug here?
     if not ext:
         if media.type == 'daisy':
             ext = 'daisy.zip'
         else:
             ext = media.type
     if not media.name:
         name = slughifi(filename.split(".")[0])
     else:
         name = slughifi(media.name)
     return 'lektura/%s.%s' % (name[:maxlen-len('lektura/.%s' % ext)-4], ext)
Example #6
0
def get_dynamic_path(media, filename, ext=None, maxlen=100):
    from slughifi import slughifi

    # how to put related book's slug here?
    if not ext:
        # BookMedia case
        ext = media.formats[media.type].ext
    if media is None or not media.name:
        name = slughifi(filename.split(".")[0])
    else:
        name = slughifi(media.name)
    return 'book/%s/%s.%s' % (ext, name[:maxlen-len('book/%s/.%s' % (ext, ext))-4], ext)
def migrar_sitios(request):
	cursor = conectar()
	cursor.execute("SELECT * FROM sitios")
	for row in cursor.fetchall():
		username = sanizar_user(row[13])
		s = Sitio(id=row[0],
				  nombre=row[1],
				  slug=row[2],
				  direccion=row[3],
				  zona=row[4],
				  ciudad=4,
				  lat=row[6],
				  lng=row[7],
				  web=row[5],
				  rank=row[8],
				  num_votos=row[11],
				  user=User.objects.get(username=username),
				  fecha=datetime.datetime.now(),
				  ip='127.0.0.1'
				  )
		s.save()
		row_tipo = row[12]
		if row_tipo == 'Tapas':
			row_tipo = 'Bar'
		tipo, created = Tipo.objects.get_or_create(tipo=row_tipo, slug=slughifi.slughifi(row_tipo))
		s.tipo.add(tipo)

	t = get_template('migrar.html')
	html = t.render(Context({
		'fin': 'FIN migrar_sitios',
		}))
	return HttpResponse(html)
Example #8
0
def get_cookin(model, links):
    saved = []
    for link in links:
        full_url = '%s/%s' % (base_url, link)
        recipe = requests.get(full_url)
        if recipe.status_code is 200:
            soup = BeautifulSoup(md.markdown(recipe.content))
            name = soup.find('h1')
            if name:
                name = name.text
            else:
                name = ' '.join(path.basename(urlparse(full_url).path).split('_')).replace('.md', '').title()
            ingredient = db.session.query(model).get(full_url)
            ingredient_data = {
                'url': full_url,
                'name': name,
                'slug': slughifi(name),
                'recipe': recipe.content.decode('utf-8'),
            }
            if not ingredient:
                ingredient = model(**ingredient_data)
                db.session.add(ingredient)
                db.session.commit()
            else:
                for k,v in ingredient_data.items():
                    setattr(ingredient, k, v)
                db.session.add(ingredient)
                db.session.commit()
            saved.append(ingredient)
        else:
            ingredient = model.query.get(full_url)
            if ingredient:
                db.session.delete(ingredient)
                db.session.commit()
    return saved
Example #9
0
def save_writers(film, writers):
    for writer in writers:
        name_surname = writer.get('name')
        writer_link = slughifi.slughifi(name_surname)
        writer_imdb = writer.personID
        writer_name, writer_surname = save_name_surname(name_surname)
        dbdir = Person.objects.filter(permalink=writer_link)
        if dbdir.count() == 0:
            person = Person(
                name=writer_name,
                surname=writer_surname,
                is_writer=True,
                status=1,
                version=1,
                type=2,
                permalink=writer_link,
                imdb_code=writer_imdb,
                actor_popularity=0,
                director_popularity=0,
                actor_popularity_month=0,
                director_popularity_month=0,
                writer_popularity=0,
                writer_popularity_month=0,
            )
            person.save(saved_by=2)
            film.writers.add(person)
            logger.debug("Saving Writer. ID=" + unicode(person))
        else:
            person = dbdir[0]
            person.is_writer = True
            person.save(saved_by=2)
            film.writers.add(person)
            logger.debug("Saving Writer. ID=" + unicode(person))
Example #10
0
    def import_xml_text(cls, text=u'', previous_book=None,
                commit_args=None, **kwargs):
        """Imports a book from XML, splitting it into chunks as necessary."""
        texts = split_xml(text)
        if previous_book:
            instance = previous_book
        else:
            instance = cls(**kwargs)
            instance.save()

        # if there are more parts, set the rest to empty strings
        book_len = len(instance)
        for i in range(book_len - len(texts)):
            texts.append((u'pusta część %d' % (i + 1), u''))

        i = 0
        for i, (title, text) in enumerate(texts):
            if not title:
                title = u'część %d' % (i + 1)

            slug = slughifi(title)

            if i < book_len:
                chunk = instance[i]
                chunk.slug = slug[:50]
                chunk.title = title[:255]
                chunk.save()
            else:
                chunk = instance.add(slug, title)

            chunk.commit(text, **commit_args)

        return instance
Example #11
0
 def tags_from_info(info):
     from slughifi import slughifi
     from sortify import sortify
     meta_tags = []
     categories = (('kinds', 'kind'), ('genres', 'genre'), ('authors', 'author'), ('epochs', 'epoch'))
     for field_name, category in categories:
         try:
             tag_names = getattr(info, field_name)
         except:
             try:
                 tag_names = [getattr(info, category)]
             except:
                 # For instance, Pictures do not have 'genre' field.
                 continue
         for tag_name in tag_names:
             tag_sort_key = tag_name
             if category == 'author':
                 tag_sort_key = tag_name.last_name
                 tag_name = tag_name.readable()
             tag, created = Tag.objects.get_or_create(slug=slughifi(tag_name), category=category)
             if created:
                 tag.name = tag_name
                 tag.sort_key = sortify(tag_sort_key.lower())
                 tag.save()
             meta_tags.append(tag)
     return meta_tags
Example #12
0
    def append(self, other, slugs=None, titles=None):
        """Add all chunks of another book to self."""
        assert self != other

        number = self[len(self) - 1].number + 1
        len_other = len(other)
        single = len_other == 1

        if slugs is not None:
            assert len(slugs) == len_other
        if titles is not None:
            assert len(titles) == len_other
            if slugs is None:
                slugs = [slughifi(t) for t in titles]

        for i, chunk in enumerate(other):
            # move chunk to new book
            chunk.book = self
            chunk.number = number

            if titles is None:
                # try some title guessing
                if other.title.startswith(self.title):
                    other_title_part = other.title[len(self.title):].lstrip(' /')
                else:
                    other_title_part = other.title

                if single:
                    # special treatment for appending one-parters:
                    # just use the guessed title and original book slug
                    chunk.title = other_title_part
                    if other.slug.startswith(self.slug):
                        chunk.slug = other.slug[len(self.slug):].lstrip('-_')
                    else:
                        chunk.slug = other.slug
                else:
                    chunk.title = ("%s, %s" % (other_title_part, chunk.title))[:255]
            else:
                chunk.slug = slugs[i]
                chunk.title = titles[i]

            chunk.slug = self.make_chunk_slug(chunk.slug)
            chunk.save()
            number += 1
        assert not other.chunk_set.exists()

        gm = GalleryMerger(self.gallery, other.gallery)
        self.gallery = gm.merge()

        # and move the gallery starts
        if gm.was_merged:
                for chunk in self[len(self) - len_other:]:
                        old_start = chunk.gallery_start or 1
                        chunk.gallery_start = old_start + gm.dest_size - gm.num_deleted
                        chunk.save()

        other.delete()
Example #13
0
def info_args(title):
    """ generate some keywords for comfortable BookInfoCreation  """
    slug = unicode(slughifi(title))
    return {
        'title': unicode(title),
        'slug': slug,
        'url': u"http://wolnelektury.pl/example/%s" % slug,
        'about': u"http://wolnelektury.pl/example/URI/%s" % slug,
    }
Example #14
0
def download_shelf(request, slug):
    """"
    Create a ZIP archive on disk and transmit it in chunks of 8KB,
    without loading the whole file into memory. A similar approach can
    be used for large dynamic PDF files.
    """
    shelf = get_object_or_404(models.Tag, slug=slug, category='set')

    formats = []
    form = forms.DownloadFormatsForm(request.GET)
    if form.is_valid():
        formats = form.cleaned_data['formats']
    if len(formats) == 0:
        formats = ['pdf', 'epub', 'odt', 'txt']

    # Create a ZIP archive
    temp = tempfile.TemporaryFile()
    archive = zipfile.ZipFile(temp, 'w')

    already = set()
    for book in collect_books(models.Book.tagged.with_all(shelf)):
        if 'pdf' in formats and book.pdf_file:
            filename = book.pdf_file.path
            archive.write(filename, str('%s.pdf' % book.slug))
        if book.root_ancestor not in already and 'epub' in formats and book.root_ancestor.epub_file:
            filename = book.root_ancestor.epub_file.path
            archive.write(filename, str('%s.epub' % book.root_ancestor.slug))
            already.add(book.root_ancestor)
        if 'odt' in formats and book.has_media("odt"):
            for file in book.get_media("odt"):
                filename = file.file.path
                archive.write(filename, str('%s.odt' % slughifi(file.name)))
        if 'txt' in formats and book.txt_file:
            filename = book.txt_file.path
            archive.write(filename, str('%s.txt' % book.slug))
    archive.close()

    response = HttpResponse(content_type='application/zip', mimetype='application/x-zip-compressed')
    response['Content-Disposition'] = 'attachment; filename=%s.zip' % slughifi(shelf.name)
    response['Content-Length'] = temp.tell()

    temp.seek(0)
    response.write(temp.read())
    return response
Example #15
0
    def build_html(self):
        from django.core.files.base import ContentFile
        from slughifi import slughifi
        from librarian import html

        meta_tags = list(self.tags.filter(
            category__in=('author', 'epoch', 'genre', 'kind')))
        book_tag = self.book_tag()

        html_output = self.wldocument(parse_dublincore=False).as_html()
        if html_output:
            self.html_file.save('%s.html' % self.slug,
                    ContentFile(html_output.get_string()))

            # get ancestor l-tags for adding to new fragments
            ancestor_tags = []
            p = self.parent
            while p:
                ancestor_tags.append(p.book_tag())
                p = p.parent

            # Delete old fragments and create them from scratch
            self.fragments.all().delete()
            # Extract fragments
            closed_fragments, open_fragments = html.extract_fragments(self.html_file.path)
            for fragment in closed_fragments.values():
                try:
                    theme_names = [s.strip() for s in fragment.themes.split(',')]
                except AttributeError:
                    continue
                themes = []
                for theme_name in theme_names:
                    if not theme_name:
                        continue
                    tag, created = Tag.objects.get_or_create(slug=slughifi(theme_name), category='theme')
                    if created:
                        tag.name = theme_name
                        tag.sort_key = theme_name.lower()
                        tag.save()
                    themes.append(tag)
                if not themes:
                    continue

                text = fragment.to_string()
                short_text = truncate_html_words(text, 15)
                if text == short_text:
                    short_text = ''
                new_fragment = Fragment.objects.create(anchor=fragment.id, book=self,
                    text=text, short_text=short_text)

                new_fragment.save()
                new_fragment.tags = set(meta_tags + themes + [book_tag] + ancestor_tags)
            self.save()
            self.html_built.send(sender=self)
            return True
        return False
Example #16
0
def info_args(title, language=None):
    """ generate some keywords for comfortable BookInfoCreation  """
    slug = unicode(slughifi(title))
    if language is None:
        language = u'pol'
    return {
        'title': unicode(title),
        'url': WLURI.from_slug(slug),
        'about': u"http://wolnelektury.pl/example/URI/%s" % slug,
        'language': language,
    }
def migrar_tags(request):
	cursor = conectar()
	cursor.execute("SELECT * FROM freetags")
	for row in cursor.fetchall():
		print row
		try:
			t = Etiqueta(id=row[0],
						 tag=slughifi.slughifi(row[1])
						)
			t.save()
		except BaseException, e:
			print e
Example #18
0
def make_headers(worksheet):
    """Make headers"""
    headers = {}
    cell_idx = 0
    while cell_idx < worksheet.ncols:
        cell_type = worksheet.cell_type(0, cell_idx)
        if cell_type == 1:
            header = slughifi(worksheet.cell_value(0, cell_idx))
            if not header.startswith("_"):
                headers[cell_idx] = header
        cell_idx += 1
    return headers
Example #19
0
def process_xlsx(content):
    """Turn Excel file contents into Tarbell worksheet data"""
    data = {}
    workbook = xlrd.open_workbook(file_contents=content)
    worksheets = workbook.sheet_names()
    for worksheet_name in worksheets:
        worksheet = workbook.sheet_by_name(worksheet_name)
        worksheet.name = slughifi(worksheet.name)
        headers = make_headers(worksheet)
        worksheet_data = make_worksheet_data(headers, worksheet)
        data[worksheet.name] = worksheet_data
    return data
Example #20
0
def make_headers(worksheet):
    """Make headers"""
    headers = {}
    cell_idx = 0
    while cell_idx < worksheet.ncols:
        cell_type = worksheet.cell_type(0, cell_idx)
        if cell_type == 1:
            header = slughifi(worksheet.cell_value(0, cell_idx))
            if not header.startswith("_"):
                headers[cell_idx] = header
        cell_idx += 1
    return headers
Example #21
0
def get_file_path(filename):
    """ Function create the name of the file and return filenamepath """
    ext = filename.split('.')[-1]
    filename_split = filename.rsplit('.', 1)
    file_name = filename_split[0]
    file_name = slughifi(file_name)
    d = date.today()
    file_year = str(d.year)
    file_month = str(d.month).zfill(2)
    file_date = str(int(datetime.datetime.now().strftime("%s")) * 1000)
    filenamepath = "%s_%s.%s" % (file_name, file_date, ext)
    return filenamepath
Example #22
0
def process_xlsx(content):
    """Turn Excel file contents into Tarbell worksheet data"""
    data = {}
    workbook = xlrd.open_workbook(file_contents=content)
    worksheets = workbook.sheet_names()
    for worksheet_name in worksheets:
        worksheet = workbook.sheet_by_name(worksheet_name)
        worksheet.name = slughifi(worksheet.name)
        headers = make_headers(worksheet)
        worksheet_data = make_worksheet_data(headers, worksheet)
        data[worksheet.name] = worksheet_data
    return data
Example #23
0
 def save_model(self, request, obj, form, change):
     try:
         slug = slughifi(obj.title)
         Post.objects.get(slug=slug)
         if not change:
             return HttpResponse('Post slug and title already exists')
         else: 
             return HttpResponse(obj.id)
     except:
         obj.slug = slug
         obj.save()
         return obj
 def __slugify(self, model_instance):
     "Slughifi is courtesy of Samuel Adam - [email protected]"
     slug_sources = [getattr(model_instance, field) for field in self.populate_from]
     slug = slughifi.slughifi((' ').join(slug_sources))
     suffix_idx = 1
     # check for uniqueness
     lookup_kwargs = {
         self.attname: slug
     }
     exclude_kwargs = {}
     if self.unique_for:
         for unique_column in self.unique_for:
             lookup_kwargs[unique_column] = getattr(model_instance, unique_column)
     if model_instance.pk:
         exclude_kwargs['pk'] = model_instance.pk
     while model_instance.__class__.objects.exclude(**exclude_kwargs).filter(**lookup_kwargs).count() > 0:
         suffix_idx += 1
         trim = self.max_length - (len(str(suffix_idx)) + 1)
         slug = slughifi.slughifi((' ').join(slug_sources)) + '-%d' % suffix_idx
         slug = slug[0:trim]
         lookup_kwargs[self.attname] = slug
     return slug
 def create_user(self, request, access, token, user_data):
     try:
         name = user_data['name']
         slug_name = slughifi(name)
         try:
             user = User.objects.get(username=slug_name) #@UnusedVariable
         except:
             pass # si no existe, no pasa nada
         else:
             raise #si existe, lanzamos una excepciónpara que escoja otro nombre de usuario
     except:
         slug_name = self.identifier_from_data(user_data)
     try:
         email = user_data['email']
     except:
         email = '*****@*****.**' % slug_name #TODO
     try:
         website = user_data['website']
     except:
         website = user_data['link']
     try:
         gender = user_data['gender']
         if gender=='hombre' or gender=='man':
             gender = 'H'
         elif gender=='mujer' or gender=='woman':
             gender = 'M'
         else:
             gender = 'I'
     except:
         gender = 'I'
     try:
         locale = user_data['locale'][0:2]
     except:
         locale = 'es'
     try:
         birthday = datetime.strptime(user_data['birthday'], '%m/%d/%Y')
     except:
         birthday = None
     user = User(username=slug_name, email=email)
     user.set_unusable_password()
     user.save()
     
     user_profile = create_user_profile(user, user.username, servicio='Facebook')
     user_profile.web = website
     user_profile.sexo = gender
     user_profile.idioma = locale
     user_profile.nacimiento = birthday
     user_profile.save()
     
     self.login_user(request, user)
     return user
Example #26
0
    def prepend_history(self, other):
        """Prepend history from all the other book's chunks to own."""
        assert self != other

        for i in range(len(self), len(other)):
            title = u"pusta część %d" % i
            chunk = self.add(slughifi(title), title)
            chunk.commit('')

        for i in range(len(other)):
            self[i].prepend_history(other[0])

        assert not other.chunk_set.exists()
        other.delete()
Example #27
0
def make_worksheet_data(headers, worksheet):
    # Make data
    data = []
    row_idx = 1
    while row_idx < worksheet.nrows:
        cell_idx = 0
        row_dict = {}
        while cell_idx < worksheet.ncols:
            cell_type = worksheet.cell_type(row_idx, cell_idx)
            if cell_type > 0 and cell_type < 5:
                cell_value = worksheet.cell_value(row_idx, cell_idx)
                try:
                    row_dict[headers[cell_idx]] = cell_value
                except KeyError:
                    try:
                        column = uppercase[cell_idx]
                    except IndexError:
                        column = cell_idx
                        puts("There is no header for cell with value '{0}' in column '{1}' of '{2}'" .format(
                            cell_value, column, worksheet.name
                        ))
            cell_idx += 1
        data.append(row_dict)
        row_idx += 1

    # Magic key handling
    if 'key' in headers.values():
        keyed_data = {}
        for row in data:
            if 'key' in row.keys():
                key = slughifi(row['key'])
                if keyed_data.get(key):
                    puts("There is already a key named '{0}' with value "
                           "'{1}' in '{2}'. It is being overwritten with "
                           "value '{3}'.".format(key,
                                   keyed_data.get(key),
                                   worksheet.name,
                                   row))

                # Magic values worksheet
                if worksheet.name == "values":
                    value = row.get('value')
                    if value:
                        keyed_data[key] = value
                else:
                    keyed_data[key] = row

        data = keyed_data

    return data
Example #28
0
def make_worksheet_data(headers, worksheet):
    # Make data
    data = []
    row_idx = 1
    while row_idx < worksheet.nrows:
        cell_idx = 0
        row_dict = {}
        while cell_idx < worksheet.ncols:
            cell_type = worksheet.cell_type(row_idx, cell_idx)
            if cell_type > 0 and cell_type < 5:
                cell_value = worksheet.cell_value(row_idx, cell_idx)
                try:
                    row_dict[headers[cell_idx]] = cell_value
                except KeyError:
                    try:
                        column = uppercase[cell_idx]
                    except IndexError:
                        column = cell_idx
                        puts(
                            "There is no header for cell with value '{0}' in column '{1}' of '{2}'"
                            .format(cell_value, column, worksheet.name))
            cell_idx += 1
        data.append(row_dict)
        row_idx += 1

    # Magic key handling
    if 'key' in headers.values():
        keyed_data = {}
        for row in data:
            if 'key' in row.keys():
                key = slughifi(row['key'])
                if keyed_data.get(key):
                    puts("There is already a key named '{0}' with value "
                         "'{1}' in '{2}'. It is being overwritten with "
                         "value '{3}'.".format(key, keyed_data.get(key),
                                               worksheet.name, row))

                # Magic values worksheet
                if worksheet.name == "values":
                    value = row.get('value')
                    if value:
                        keyed_data[key] = value
                else:
                    keyed_data[key] = row

        data = keyed_data

    return data
Example #29
0
def save_writers(film, writers):
    for writer in writers:
        name_surname = writer.get('name')
        writer_link = slughifi.slughifi(name_surname)
        writer_imdb = writer.personID
        writer_name, writer_surname = save_name_surname(name_surname)
        dbdir = Person.objects.filter(permalink = writer_link)
        if dbdir.count() == 0:
            person = Person(name = writer_name, surname = writer_surname, is_writer = True, status = 1, version = 1, type=2, permalink = writer_link, imdb_code =writer_imdb, actor_popularity=0,director_popularity=0,actor_popularity_month=0,director_popularity_month=0,writer_popularity=0,writer_popularity_month=0,)
            person.save(saved_by=2)
            film.writers.add(person)
            logger.debug("Saving Writer. ID=" + unicode(person))
        else:
            person = dbdir[0]
            person.is_writer = True                    
            person.save(saved_by=2)
            film.writers.add(person)
            logger.debug("Saving Writer. ID=" + unicode(person))
Example #30
0
def gallery(slug, text):
    result = {}

    m = re.match(META_REGEX, text)
    if m:
        for line in m.group(1).split('\n'):
            try:
                k, v = line.split(':', 1)
                result[k.strip()] = v.strip()
            except ValueError:
                continue

    gallery = result.get('gallery', slughifi(slug))

    if gallery.startswith('/'):
        gallery = os.path.basename(gallery)

    return gallery
 def create_user(self, request, access, token, user_data):
     try:
         name = user_data['screen_name']
         slug_name = slughifi(name)
         try:
             user = User.objects.get(username=slug_name) #@UnusedVariable
         except:
             pass # si no existe, no pasa nada
         else:
             raise #si existe, lanzamos una excepciónpara que escoja otro nombre de usuario
     except:
         slug_name = self.identifier_from_data(user_data)
     
     email = '*****@*****.**' % slug_name #TODO
     
     try:
         website = user_data['url']
     except:
         website = 'http://twitter.com/%s' % slug_name
     
     gender = 'I'
     
     try:
         locale = user_data['lang']
     except:
         locale = 'es'
     
     birthday = None
     
     user = User(username=slug_name, email=email)
     user.set_unusable_password()
     user.save()
     
     user_profile = create_user_profile(user, user.username, servicio='Twitter')
     user_profile.web = website
     user_profile.sexo = gender
     user_profile.idioma = locale
     user_profile.nacimiento = birthday
     user_profile.save()
     
     self.login_user(request, user)
     return user
Example #32
0
def slugify(s, entities=True, decimal=True, hexadecimal=True, model=None, slug_field='slug', pk=None):
    s = smart_unicode(s)
    # we don't want a string > 40 characters
    if len(s) > 40:
        s = s[:40]

    s = slughifi(s)

    slug = s
    if model:  
        # return unique slug for a model (appending integer counter)
        def get_query():
            query = model.objects.filter(**{ slug_field: slug })
            if pk:
                query = query.exclude(pk=pk)
            return query
        counter = 2
        while get_query():
            slug = "%s-%s" % (s, counter)
            counter += 1
    return slug
Example #33
0
    price = row[10]
    idx = row[11]
    canonical = row[12]
    clr = row[13]

    # store publisher
    if not row[0] in publishers:
        publisher_idx += 1
        print('====')
        print(publisher)
        book_index = 0
        publishers.append(publisher)

    # create publisher dir
    publisher_path = 'publishers/{}_{}'.format(publisher_idx,
                                               slughifi(publisher))
    if not os.path.exists(publisher_path):
        os.makedirs(publisher_path)
        # create txt file for publisher
    publisher_filename = os.path.join(publisher_path, 'publisher.txt')
    publisher_file = open(publisher_filename, "w")
    publisher_file.write('Title: {}'.format(publisher))
    publisher_file.write("\n\n----\n\n")
    publisher_file.write('Publisher_url: {}'.format("/".join(
        book_url.split('/')[0:3])))
    publisher_file.write("\n\n----\n\n")
    publisher_file.close()

    print('----')
    print(title)
    book_slug = slughifi(title)
Example #34
0
def label_to_key( label ):
    # need to cut because of POSTRGES max column name length
    return slughifi.slughifi(label, True).replace('-', '_')[:63]
Example #35
0
File: dj.py Project: hchapman/WBOR
  def post(self):
    self.response.headers['Content-Type'] = 'text/json'
    action = self.request.get("action")

    if action == "add":
      self.response.headers['Content-Type'] = 'text/json'
      # asin is Amazon's special ID number.
      # unique to the product (but different versions of the same
      # thing will have different ASIN's, like a vinyl vs. a cd vs. a
      # special edition etc.)
      asin = self.request.get("asin")
      album = Album.get(asin=asin)
      if album:
        album.set_new()
        album.put()
        self.response.out.write(json.dumps({
          'msg': "Success, already existed. The album was re-set to new."
        }))
        return

      # Grab the product details from Amazon to save to our datastore.
      i = amazon.productSearch(asin)
      try:
        i = i[0]
      except IndexError:
        self.response.out.write(json.dumps({
          'err': ("An error occurred.  Please try again, or if this keeps "
                  "happening, select a different album.")
        }))
        return
      # this overly complicated code sets up the json data associated with
      # the album we're adding.  It pulls the appropriate values from the
      # XML received.
      json_data = {
        'small_pic': i.getElementsByTagName(
          "SmallImage")[0].getElementsByTagName("URL")[0].firstChild.nodeValue,
        'large_pic': i.getElementsByTagName(
          "LargeImage")[0].getElementsByTagName("URL")[0].firstChild.nodeValue,
        'artist': i.getElementsByTagName("Artist")[0].firstChild.nodeValue,
        'title': i.getElementsByTagName("Title")[0].firstChild.nodeValue,
        'asin': i.getElementsByTagName("ASIN")[0].firstChild.nodeValue,
        'tracks': [t.firstChild.nodeValue
                   for t in i.getElementsByTagName("Track")],
      }
      largeCover = urlfetch.fetch(json_data['large_pic']).content
      large_filetype = json_data['large_pic'][-4:].strip('.')
      smallCover = urlfetch.fetch(json_data['small_pic']).content
      small_filetype = json_data['small_pic'][-4:].strip('.')

      title = json_data['title']
      artist = json_data['artist']
      tracks = json_data['tracks']

    elif action == "makeNew":
      # We're marking an existing album as "new" again
      self.response.headers['Content-Type'] = 'text/json'
      key = self.request.get("key")
      try:
        album = Album.get(key)
        album.set_new()
        album.put()
      except:
        self.response.out.write(
            json.dumps({'err': "Album not found. Please try again."}))
        return

      self.response.out.write(json.dumps({'msg': "Made new."}))
      return

    elif action == "makeOld":
      # We're removing the "new" marking from an album
      self.response.headers['Content-Type'] = 'text/json'
      key = self.request.get("key")
      try:
        album = Album.get(key)
        album.unset_new()
        album.put()
      except NoSuchEntry:
        pass
      self.response.out.write(json.dumps({'msg': "Made old."}))
      return

    elif action == "manual":
      # The user has typed in the title, the artist, all track names,
      # and provided a cover image URL.
      tracks = self.request.get("track-list")
      tracks = [line.strip() for line in tracks.splitlines() if
                len(line.strip()) > 0]
      cover_url = self.request.get("cover_url")
      if not cover_url:
        cover_url = "/static/images/noalbumart.png"

      # Try to fetch the cover art, if possible
      try:
        largeCover = urlfetch.fetch(cover_url).content
      except urlfetch.ResponseTooLargeError:
        if self.request.get("ajax"):
          self.response.out.write(
            json.dumps({
                'msg': ("The image you provided was too large. "
                        "There is a 1MB limit on cover artwork. "
                        "Try a different version with a reasonable size."),
                'result': 1,}))
        else:
          self.session.add_flash(
              "The image you provided was too large. "
              "There is a 1MB limit on cover artwork. "
              "Try a different version with a reasonable size.")
          self.redirect("/dj/albums/")
        return
      except urlfetch.InvalidURLError:
        if self.request.get("ajax"):
          self.response.out.write(
            json.dumps({
                'msg': ("The URL you provided could not be downloaded. "
                        "Hit back and try again."),
                'result': 1,}))
        else:
          self.session.add_flash("The URL you provided could "
                                 "not be downloaded. "
                                 "Try again.")
          self.redirect("/dj/albums/")
        return
      except urlfetch.DownloadError:
        if self.request.get("ajax"):
          self.response.out.write(
            json.dumps({
              'msg': ("The URL you provided could not be downloaded. "
                      "Hit back and try again."),
              'result': 1,}))
        else:
          self.session.add_flash("The URL you provided could "
                                 "not be downloaded. Try again.")
          self.redirect("/dj/albums")
        return

      large_filetype = cover_url[-4:].strip('.')
      smallCover = images.resize(largeCover, 100, 100)
      small_filetype = large_filetype

      title = self.request.get('title')
      artist = self.request.get('artist')
      asin = None


    ## Create the actual objects and store them
    fn = "%s_%s"%(slughifi(artist), slughifi(title))
    # Create the file nodes in the blobstore
    # _blobinfo_uploaed_filename WILL change in the future.
    small_file = files.blobstore.create(
      mime_type=small_filetype,
      _blobinfo_uploaded_filename="%s_small.png"%fn)
    large_file = files.blobstore.create(
      mime_type=large_filetype,
      _blobinfo_uploaded_filename="%s_big.png"%fn)

    # Write the images
    with files.open(small_file, 'a') as small:
      small.write(smallCover)
    with files.open(large_file, 'a') as large:
      large.write(largeCover)

    files.finalize(small_file)
    files.finalize(large_file)

    cover_small=files.blobstore.get_blob_key(small_file)
    cover_large=files.blobstore.get_blob_key(large_file)

    # Finally, create the album
    album = Album.new(title=title,
                      artist=artist,
                      tracks=tracks,
                      asin=asin,
                      cover_small=cover_small,
                      cover_large=cover_large)
    album.put()

    if self.request.get("ajax"):
      self.response.out.write(
        json.dumps({
            'msg': ("The album \"%s\" by %s was successfully added."%
                    (title, artist)),
            'result': 0,}))
Example #36
0
from catalogue.models import Book
from mutagen import easyid3
from slughifi import slughifi

chosen_book_slugs = set()

for file_name in os.listdir('mp3'):
    base_name, ext = splitext(file_name)
    if ext != '.mp3':
        continue

    audio = easyid3.EasyID3(join('mp3', file_name))
    title = audio['title'][0]
    artist = title.split(',', 1)[0].strip()
    artist_slug = slughifi(artist)
    title_part = slughifi(title.rsplit(',', 1)[1].strip())

    print "--------------------"
    print "File: %s" % file_name
    print "Title: %s" % title
    print
    print "Matching books:"

    matching_books = [book for book in Book.tagged.with_all(artist_slug) if book.slug not in chosen_book_slugs]
    matching_books = [book for book in matching_books if title_part in book.slug]

    if len(matching_books) > 1:
        for i, book in enumerate(matching_books):
            print "%d: %s (%s)" % (i, book.title, ', '.join(tag.slug for tag in book.tags))
        print
Example #37
0
from django.core.management import setup_environ
from wolnelektury import settings

setup_environ(settings)

from catalogue.models import Tag


doc = etree.parse('rodziny.xml')

for element in doc.findall('//span'):
    themes = [s.strip() for s in element.text.split(',')]

    element.text = u''

    for theme in themes:
        try:
            Tag.objects.get(slug=slughifi(theme))

            link = etree.SubElement(element, 'a', href=u'/katalog/%s' % slughifi(theme))
            link.text = theme
            link.tail = ', '
            last_link = link
        except:
            print "Pomijam %s" % slughifi(theme)

    last_link.tail = ''


doc.write('ok.xml', xml_declaration=False, pretty_print=True, encoding='utf-8')
Example #38
0
def migrate_file_from_hg(orm, fname, entry):
    fname = urlunquote(fname)
    print fname
    if fname.endswith('.xml'):
        fname = fname[:-4]
    title = file_to_title(fname)
    fname = slughifi(fname)

    # create all the needed objects
    # what if it already exists?
    book = orm.Book.objects.create(
        title=title,
        slug=fname)
    chunk = orm.Chunk.objects.create(
        book=book,
        number=1,
        slug='1')
    try:
        chunk.stage = orm.ChunkTag.objects.order_by('ordering')[0]
    except IndexError:
        chunk.stage = None

    maxrev = entry.filerev()
    gallery_link = None

    # this will fail if directory exists
    os.makedirs(os.path.join(settings.CATALOGUE_REPO_PATH, str(chunk.pk)))

    for rev in xrange(maxrev + 1):
        fctx = entry.filectx(rev)
        data = fctx.data()
        gallery_link = gallery(fname, data)
        data = plain_text(data)

        # get tags from description
        description = fctx.description().decode("utf-8", 'replace')
        tags = STAGE_TAGS_RE.findall(description)
        tags = [orm.ChunkTag.objects.get(slug=slug.strip()) for slug in tags]

        if tags:
            max_ordering = max(tags, key=lambda x: x.ordering).ordering
            try:
                chunk.stage = orm.ChunkTag.objects.filter(ordering__gt=max_ordering).order_by('ordering')[0]
            except IndexError:
                chunk.stage = None

        description = STAGE_TAGS_RE.sub('', description)

        author = author_name = author_email = None
        author_desc = fctx.user().decode("utf-8", 'replace')
        m = AUTHOR_RE.match(author_desc)
        if m:
            try:
                author = orm['auth.User'].objects.get(username=m.group(1), email=m.group(2))
            except orm['auth.User'].DoesNotExist:
                author_name = m.group(1)
                author_email = m.group(2)
        else:
            author_name = author_desc

        head = orm.ChunkChange.objects.create(
            tree=chunk,
            revision=rev + 1,
            created_at=datetime.datetime.fromtimestamp(fctx.date()[0]),
            description=description,
            author=author,
            author_name=author_name,
            author_email=author_email,
            parent=chunk.head
            )

        path = "%d/%d" % (chunk.pk, head.pk)
        abs_path = os.path.join(settings.CATALOGUE_REPO_PATH, path)
        f = open(abs_path, 'wb')
        f.write(compress(data))
        f.close()
        head.data = path

        head.tags = tags
        head.save()

        chunk.head = head

    chunk.save()
    if gallery_link:
        book.gallery = gallery_link
        book.save()