def fetch_and_create_tags(tag_names): # copy the tag_names list new_tag_names = tag_names[:] # find tag names that already exist (case insensitive match) # and remove those names from our list lower_case_tags = [t.lower() for t in new_tag_names] existing_tags = DBSession.query(Tag).\ filter( func.lower(Tag.name).in_(lower_case_tags) ).all() for t in existing_tags: for n in new_tag_names[:]: if n.lower() == t.name.lower(): new_tag_names.remove(n) break # create the tags that don't yet exist if new_tag_names: new_tags = [{'name': n, 'slug': slugify(n)} for n in new_tag_names] DBSession.connection().execute(tags.insert(), new_tags) DBSession.flush() existing_tags += DBSession.query(Tag)\ .filter( Tag.slug.in_([t['slug'] for t in new_tags]) ).all() return existing_tags
def podcast_from_feed(d, tags=False, save_files=False): # Assume not explicit explicit = False if 'itunes_explicit' in d['feed']: explicit = bool(d['feed']['itunes_explicit']) image = None if 'image' in d['feed']: image = d['feed']['image']['href'] title = u'' if 'title' in d['feed']: title = d['feed']['title'] description = u'' if 'summary' in d['feed']: description = d['feed']['summary'] subtitle = u'' if 'subtitle' in d['feed']: subtitle = d['feed']['subtitle'] slug = slugify(title) author_name = u"PLACEHOLDER NAME" author_email = u"*****@*****.**" podcast = Podcast() podcast.slug = get_available_slug(Podcast, slug, podcast) podcast.title = title podcast.subtitle = subtitle podcast.author = Author(author_name, author_email) podcast.description = description podcast.explicit = explicit DBSession.add(podcast) DBSession.flush() # Create thumbs from image, or default thumbs created_images = False if image: temp_imagefile = tempfile.TemporaryFile() imagefile = urllib2.urlopen(image) temp_imagefile.write(imagefile.read()) temp_imagefile.seek(0) filename = urlparse.urlparse(image)[2] create_thumbs_for(podcast, temp_imagefile, filename) created_images = True if not created_images: create_default_thumbs_for(podcast) # Now add all of the entries for entry in d['entries']: media = media_from_entry(entry, tags, save_files) media.podcast = podcast return podcast
def fetch_and_create_tags(tag_names): """Return a list of Tag instances that match the given names. Tag names that don't yet exist are created automatically and returned alongside the results that did already exist. If you try to create a new tag that would have the same slug as an already existing tag, the existing tag is used instead. :param tag_names: The display :attr:`Tag.name` :type tag_names: list :returns: A list of :class:`Tag` instances. :rtype: :class:`TagList` instance """ results = TagList() lower_names = [name.lower() for name in tag_names] slugs = [slugify(name) for name in lower_names] # Grab all the tags that exist already, whether its the name or slug # that matches. Slugs can be changed by the tag settings UI so we can't # rely on each tag name evaluating to the same slug every time. results = Tag.query.filter( sql.or_(func.lower(Tag.name).in_(lower_names), Tag.slug.in_(slugs))).all() # Filter out any tag names that already exist (case insensitive), and # any tag names evaluate to slugs that already exist. for tag in results: # Remove the match from our three lists until its completely gone while True: try: try: index = slugs.index(tag.slug) except ValueError: index = lower_names.index(tag.name.lower()) tag_names.pop(index) lower_names.pop(index) slugs.pop(index) except ValueError: break # Any remaining tag names need to be created. if tag_names: # We may still have multiple tag names which evaluate to the same slug. # Load it into a dict so that duplicates are overwritten. uniques = dict((slug, name) for slug, name in izip(slugs, tag_names)) # Do a bulk insert to create the tag rows. new_tags = [{'name': n, 'slug': s} for s, n in uniques.iteritems()] DBSession.execute(tags.insert(), new_tags) DBSession.flush() # Query for our newly created rows and append them to our result set. results += Tag.query.filter(Tag.slug.in_(uniques.keys())).all() return results
def fetch_and_create_tags(tag_names): """Return a list of Tag instances that match the given names. Tag names that don't yet exist are created automatically and returned alongside the results that did already exist. If you try to create a new tag that would have the same slug as an already existing tag, the existing tag is used instead. :param tag_names: The display :attr:`Tag.name` :type tag_names: list :returns: A list of :class:`Tag` instances. :rtype: :class:`TagList` instance """ results = TagList() lower_names = [name.lower() for name in tag_names] slugs = [slugify(name) for name in lower_names] # Grab all the tags that exist already, whether its the name or slug # that matches. Slugs can be changed by the tag settings UI so we can't # rely on each tag name evaluating to the same slug every time. results = Tag.query.filter(sql.or_(func.lower(Tag.name).in_(lower_names), Tag.slug.in_(slugs))).all() # Filter out any tag names that already exist (case insensitive), and # any tag names evaluate to slugs that already exist. for tag in results: # Remove the match from our three lists until its completely gone while True: try: try: index = slugs.index(tag.slug) except ValueError: index = lower_names.index(tag.name.lower()) tag_names.pop(index) lower_names.pop(index) slugs.pop(index) except ValueError: break # Any remaining tag names need to be created. if tag_names: # We may still have multiple tag names which evaluate to the same slug. # Load it into a dict so that duplicates are overwritten. uniques = dict((slug, name) for slug, name in izip(slugs, tag_names)) # Do a bulk insert to create the tag rows. new_tags = [{'name': n, 'slug': s} for s, n in uniques.iteritems()] DBSession.execute(tags.insert(), new_tags) DBSession.flush() # Query for our newly created rows and append them to our result set. results += Tag.query.filter(Tag.slug.in_(uniques.keys())).all() return results
def fetch_and_create_tags(tag_names): tag_dict = dict() for t in tag_names: tag_dict[slugify(t)] = t existing_tags = DBSession.query(Tag).filter(Tag.slug.in_(tag_dict.keys())).all() existing_slugs = [t.slug for t in existing_tags] new_slugs = [s for s in tag_dict.keys() if s not in existing_slugs] new_tags = [{'name': tag_dict[s], 'slug': s} for s in new_slugs] if new_tags: DBSession.connection().execute(tags.insert(), new_tags) DBSession.flush() existing_tags += DBSession.query(Tag).filter(Tag.slug.in_(new_slugs)).all() return existing_tags
def fetch_and_create_tags(tag_names): """Return a list of Tag instances that match the given names. Tag names that don't yet exist are created automatically and returned alongside the results that did already exist. If you try to create a new tag that would have the same slug as an already existing tag, the existing tag is used instead. :param tag_names: The display :attr:`Tag.name` :type tag_names: list :returns: A list of :class:`Tag` instances. :rtype: :class:`TagList` instance """ results = TagList() lower_names = [name.lower() for name in tag_names] slugs = [slugify(name) for name in lower_names] matches = Tag.query.filter(sql.or_(func.lower(Tag.name).in_(lower_names), Tag.slug.in_(slugs))) for tag in matches: results.append(tag) # Remove the match from our three lists until its completely gone while True: try: try: index = slugs.index(tag.slug) except ValueError: index = lower_names.index(tag.name.lower()) tag_names.pop(index) lower_names.pop(index) slugs.pop(index) except ValueError: break if tag_names: new_tags = [{'name': n, 'slug': s} for n, s in zip(tag_names, slugs)] DBSession.execute(tags.insert(), new_tags) DBSession.flush() results += Tag.query.filter(Tag.slug.in_(slugs)).all() return results
def fetch_and_create_tags(tag_names): """Return a list of Tag instances that match the given names. Tag names that don't yet exist are created automatically and returned alongside the results that did already exist. If you try to create a new tag that would have the same slug as an already existing tag, the existing tag is used instead. :param tag_names: The display :attr:`Tag.name` :type tag_names: list :returns: A list of :class:`Tag` instances. :rtype: :class:`TagList` instance """ results = TagList() lower_names = [name.lower() for name in tag_names] slugs = [slugify(name) for name in lower_names] matches = Tag.query.filter( sql.or_(func.lower(Tag.name).in_(lower_names), Tag.slug.in_(slugs))) for tag in matches: results.append(tag) # Remove the match from our three lists until its completely gone while True: try: try: index = slugs.index(tag.slug) except ValueError: index = lower_names.index(tag.name.lower()) tag_names.pop(index) lower_names.pop(index) slugs.pop(index) except ValueError: break if tag_names: new_tags = [{'name': n, 'slug': s} for n, s in zip(tag_names, slugs)] DBSession.execute(tags.insert(), new_tags) DBSession.flush() results += Tag.query.filter(Tag.slug.in_(slugs)).all() return results
def validate_slug(self, key, slug): return slugify(slug)
def media_from_entry(e, tags=False, save_files=False): # Get tags as a list of unicode objects. tags = [t['term'] for t in e['tags']] # Assume not explicit. explicit = 0 if 'itunes_explicit' in e: explicit = e['itunes_explicit'] # Find the duration, if it exists duration = u'' if 'itunes_duration' in e: try: duration = e['itunes_duration'] duration = duration_to_seconds(duration) except ValueError: duration = None # Find the first <img> tag in the summary, if there is one image = None m = img_regex.match(e['summary']) if m is not None: image = m.group(1)[1:-1] title = e['title'] slug = slugify(title) author_name = u"PLACEHOLDER NAME" author_email = u"*****@*****.**" if 'author_detail' in e: if 'name' in e['author_detail']: author_name = e['author_detail']['name'] if 'email' in e['author_detail']: author_email = e['author_detail']['email'] year, month, day, hour, minute, second = e['updated_parsed'][:6] updated = datetime(year, month, day, hour, minute, second) media = Media() media.slug = get_available_slug(Media, slug, media) media.title = e['title'] media.author = Author(author_name, author_email) media.description = e['summary'] media.notes = u'' if tags: media.set_tags(tags) else: media.set_categories(tags) media.publish_on = updated media.created_on = updated media.publishable = True media.reviewed = True media.duration = duration DBSession.add(media) DBSession.flush() # Create thumbs from image, or default thumbs created_images = False if image: temp_imagefile = tempfile.TemporaryFile() imagefile = urllib2.urlopen(image) temp_imagefile.write(imagefile.read()) temp_imagefile.seek(0) filename = urlparse.urlparse(image)[2] create_thumbs_for(media, temp_imagefile, filename) created_images = True if not created_images: create_default_thumbs_for(media) print "Loaded episode:", media # now add all of the files. for enc in e['enclosures']: mf = media_file_from_enclosure(enc, media, save_files) print "Loaded media file:", mf media.update_status() return media