def increment_views(self): """Increment the number of views in the database. We avoid concurrency issues by incrementing JUST the views and not allowing modified_on to be updated automatically. """ if self.id is None: self.views += 1 return self.views # Don't raise an exception should concurrency problems occur. # Views will not actually be incremented in this case, but thats # relatively unimportant compared to rendering the page for the user. # We may be able to remove this after we improve our triggers to not # issue an UPDATE on media_fulltext unless one of its columns are # actually changed. Even when just media.views is updated, all the # columns in the corresponding media_fulltext row are updated, and # media_fulltext's MyISAM engine must lock the whole table to do so. transaction = DBSession.begin_nested() try: DBSession.query(self.__class__)\ .filter(self.__class__.id == self.id)\ .update({self.__class__.views: self.__class__.views + 1}) transaction.commit() except OperationalError, e: transaction.rollback() # (OperationalError) (1205, 'Lock wait timeout exceeded, try restarting the transaction') if not '1205' in e.message: raise
def increment_views(self): """Increment the number of views in the database. We avoid concurrency issues by incrementing JUST the views and not allowing modified_on to be updated automatically. """ if self.id is None: self.views += 1 return self.views # Don't raise an exception should concurrency problems occur. # Views will not actually be incremented in this case, but thats # relatively unimportant compared to rendering the page for the user. # We may be able to remove this after we improve our triggers to not # issue an UPDATE on media_fulltext unless one of its columns are # actually changed. Even when just media.views is updated, all the # columns in the corresponding media_fulltext row are updated, and # media_fulltext's MyISAM engine must lock the whole table to do so. transaction = DBSession.begin_nested() try: DBSession.query(self.__class__)\ .filter(self.__class__.id == self.id)\ .update({self.__class__.views: self.__class__.views + 1}) transaction.commit() except exc.OperationalError, e: transaction.rollback() # (OperationalError) (1205, 'Lock wait timeout exceeded, try restarting the transaction') if not '1205' in e.message: raise
def restore_necessary_files(): # Restore the appropriate media files and thumbnail files # for any media currently in the database. # Use the python models to do this. if not deleted_dir: return filename_pairs = [] for media in DBSession.query(Media).all(): for thumb in thumb_paths(media).values(): filename_pairs.append(( thumb.replace(m_img_dir, m_deleted_dir), thumb )) for file in media.files: if file.file_path: filename_pairs.append(( file.file_path.replace(media_dir, m_deleted_dir), file.file_path )) for podcast in DBSession.query(Podcast).all(): for thumb in thumb_paths(podcast).values(): filename_pairs.append(( thumb.replace(p_img_dir, p_deleted_dir), thumb )) for src, dest in filename_pairs: if os.path.exists(src): if DEBUG: print "Moving %s to %s" % (src, dest) shutil.move(src, dest)
def fetch_and_create_tags(tag_names): # copy the tag_names list new_tag_names = tag_names[:] # find tag names that already exist (case insensitive match) # and remove those names from our list lower_case_tags = [t.lower() for t in new_tag_names] existing_tags = DBSession.query(Tag).\ filter( func.lower(Tag.name).in_(lower_case_tags) ).all() for t in existing_tags: for n in new_tag_names[:]: if n.lower() == t.name.lower(): new_tag_names.remove(n) break # create the tags that don't yet exist if new_tag_names: new_tags = [{'name': n, 'slug': slugify(n)} for n in new_tag_names] DBSession.connection().execute(tags.insert(), new_tags) DBSession.flush() existing_tags += DBSession.query(Tag)\ .filter( Tag.slug.in_([t['slug'] for t in new_tags]) ).all() return existing_tags
def backup_files(dump_dir): # Backup all files (media files, thumbs) referenced by an object in the DB # to the provided dump_dir. # TODO: display errors when file operations fail if dump_dir == "/": return 1, "Dump Files directory should never be the root directory, '/'" # normalize dirname dump_dir = dump_dir.rstrip(os.sep) + os.sep # These are the directories we will write to. media_thumb_dir = dump_dir + Media._thumb_dir podcast_thumb_dir = dump_dir + Podcast._thumb_dir media_files_dir = dump_dir + "media_files" # Initialize our default paths to backup default_images = ["news.jpg", "newm.jpg", "newl.jpg"] media_thumbs = [m_img_dir + os.sep + img for img in default_images] podcast_thumbs = [p_img_dir + os.sep + img for img in default_images] media_files = [] # Add the media thumbs and media files for media in DBSession.query(Media).all(): file_paths = [file_path(f) for f in media.files] media_files += [fp for fp in file_paths if fp] media_thumbs += thumb_paths(media).values() # Add the podcast thumbs for podcast in DBSession.query(Podcast).all(): podcast_thumbs += thumb_paths(podcast).values() # Ensure the necessary directories exist. assert os.path.isdir(dump_dir) for subdir in (media_thumb_dir, media_files_dir, podcast_thumb_dir): if not os.path.exists(subdir): os.mkdir(subdir) assert os.path.isdir(subdir) empty_dir(subdir) # Copy over all of the files: sources_dests = ( (media_thumbs, media_thumb_dir), (media_files, media_files_dir), (podcast_thumbs, podcast_thumb_dir), ) for sources, dest_dir in sources_dests: for src in sources: if DEBUG: print "Copying %s to %s%s" % (src, dest_dir, os.sep) shutil.copy2(src, dest_dir) return ( 0, "%d thumbnails and %d media files successfully backed up" % (len(media_thumbs) + len(podcast_thumbs), len(media_files)), )
def backup_files(dump_dir): # Backup all files (media files, thumbs) referenced by an object in the DB # to the provided dump_dir. # TODO: display errors when file operations fail if dump_dir == '/': return 1, "Dump Files directory should never be the root directory, '/'" # normalize dirname dump_dir = dump_dir.rstrip(os.sep) + os.sep # These are the directories we will write to. media_thumb_dir = dump_dir + Media._thumb_dir podcast_thumb_dir = dump_dir + Podcast._thumb_dir media_files_dir = dump_dir + 'media_files' # Initialize our default paths to backup default_images = ['news.jpg', 'newm.jpg', 'newl.jpg'] media_thumbs = [m_img_dir+os.sep+img for img in default_images] podcast_thumbs = [p_img_dir+os.sep+img for img in default_images] media_files = [] # Add the media thumbs and media files for media in DBSession.query(Media).all(): file_paths = [file_path(f) for f in media.files] media_files += [fp for fp in file_paths if fp] media_thumbs += thumb_paths(media).values() # Add the podcast thumbs for podcast in DBSession.query(Podcast).all(): podcast_thumbs += thumb_paths(podcast).values() # Ensure the necessary directories exist. assert os.path.isdir(dump_dir) for subdir in (media_thumb_dir, media_files_dir, podcast_thumb_dir): if not os.path.exists(subdir): os.mkdir(subdir) assert os.path.isdir(subdir) empty_dir(subdir) # Copy over all of the files: sources_dests = ( (media_thumbs, media_thumb_dir), (media_files, media_files_dir), (podcast_thumbs, podcast_thumb_dir), ) for sources, dest_dir in sources_dests: for src in sources: if DEBUG: print "Copying %s to %s%s" % (src, dest_dir, os.sep) shutil.copy2(src, dest_dir) return 0,'%d thumbnails and %d media files successfully backed up' %\ (len(media_thumbs) + len(podcast_thumbs), len(media_files))
def remove_unnecessary_files(): # Move all media files and thumbnail files into 'deleted' folder. # XXX: don't run if deleted_dir is not set! if not deleted_dir: return for media in DBSession.query(Media).all(): file_paths = thumb_paths(media).values() for f in media.files: file_paths.append(f.file_path) helpers.delete_files(file_paths, 'media') for podcast in DBSession.query(Podcast).all(): file_paths = thumb_paths(podcast).values() helpers.delete_files(file_paths, 'podcasts')
def get_available_slug(mapped_class, string, ignore=None, slug_attr='slug', slug_length=SLUG_LENGTH): """Return a unique slug based on the provided string. Works by appending an int in sequence starting with 2: 1. awesome-stuff 2. awesome-stuff-2 3. awesome-stuff-3 :param mapped_class: The ORM-controlled model that the slug is for :param string: A title, name, etc :type string: unicode :param ignore: A record which doesn't count as a collision :type ignore: Int ID, ``mapped_class`` instance or None :returns: A unique slug :rtype: unicode """ if isinstance(ignore, mapped_class): ignore = ignore.id elif ignore is not None: ignore = int(ignore) new_slug = slug = slugify(string) appendix = 2 while DBSession.query(mapped_class.id)\ .filter(getattr(mapped_class, slug_attr) == new_slug)\ .filter(mapped_class.id != ignore)\ .first(): str_appendix = u'-%s' % appendix max_substr_len = slug_length - len(str_appendix) new_slug = slug[:max_substr_len] + str_appendix appendix += 1 return new_slug
def insert_settings(defaults): """Insert the given setting if they don't exist yet. XXX: Does not include any support for MultiSetting. This approach won't work for that. We'll need to use sqlalchemy-migrate. :type defaults: list :param defaults: Key and value pairs :rtype: list :returns: Any settings that have just been created. """ inserted = [] existing_settings = set(x[0] for x in DBSession.query(Setting.key) \ .filter(Setting.key \ .in_(key for key, value in defaults))) for key, value in defaults: if key in existing_settings: continue transaction = DBSession.begin_nested() try: s = Setting(key, value) DBSession.add(s) transaction.commit() inserted.append(s) except IntegrityError: transaction.rollback() if inserted: DBSession.commit() return inserted
def panda_retry(self, file_id, encoding_id, **kwargs): media_file = fetch_row(MediaFile, file_id) storage = DBSession.query(PandaStorage).first() storage.panda_helper().retry_transcode(media_file, encoding_id) return dict( success = True, )
def save(self, id, display_name, group_name, permissions, delete=None, **kwargs): """Save changes or create a new :class:`~mediacore.model.auth.Group` instance. :param id: Group ID. If ``"new"`` a new group is created. :type id: ``int`` or ``"new"`` :returns: Redirect back to :meth:`index` after successful save. """ group = fetch_row(Group, id) if delete: DBSession.delete(group) redirect(action='index', id=None) group.display_name = display_name group.group_name = group_name if permissions: query = DBSession.query(Permission).filter( Permission.permission_id.in_(permissions)) group.permissions = list(query.all()) else: group.permissions = [] DBSession.add(group) redirect(action='index', id=None)
def get_available_slug(mapped_class, string, ignore=None): """Return a unique slug based on the provided string. Works by appending an int in sequence starting with 2: 1. awesome-stuff 2. awesome-stuff-2 3. awesome-stuff-3 :param mapped_class: The ORM-controlled model that the slug is for :param string: A title, name, etc :type string: unicode :param ignore: A record which doesn't count as a collision :type ignore: Int ID, ``mapped_class`` instance or None :returns: A unique slug :rtype: unicode """ if isinstance(ignore, mapped_class): ignore = ignore.id elif ignore is not None: ignore = int(ignore) new_slug = slug = slugify(string) appendix = 2 while DBSession.query(mapped_class.id)\ .filter(mapped_class.slug == new_slug)\ .filter(mapped_class.id != ignore)\ .first(): str_appendix = u'-%s' % appendix max_substr_len = SLUG_LENGTH - len(str_appendix) new_slug = slug[:max_substr_len] + str_appendix appendix += 1 return new_slug
def disassociate_video_id(self, media_file, video_id): # Create a meta_key for this MediaCore::MediaFile -> Panda::Video pairing. # This is sort of a perversion of the meta table, but hey, it works. meta_key = u"%s%s" % (META_VIDEO_PREFIX, video_id) mfm = DBSession.query(MediaFilesMeta)\ .filter(MediaFilesMeta.media_files_id==media_file.id)\ .filter(MediaFilesMeta.key==meta_key) for x in mfm: DBSession.delete(x)
def main(parser, options, args): for engine in DBSession.query(AmazonS3Storage): bucket = engine.connect_to_bucket() key = Key(bucket) key.key = 'crossdomain.xml' key.set_contents_from_string(CONTENTS, {'Content-Type': 'application/xml'}) key.set_acl('public-read') sys.exit(0) if __name__ == "__main__": main(cmd.parser, cmd.options, cmd.args)
def comments_save(self, **kwargs): """Save :class:`~mediacore.forms.admin.settings.CommentsForm`.""" old_vulgarity_filter = c.settings['vulgarity_filtered_words'].value self._save(comments_form, values=kwargs) # Run the filter now if it has changed if old_vulgarity_filter != c.settings['vulgarity_filtered_words'].value: for comment in DBSession.query(Comment): comment.body = filter_vulgarity(comment.body) redirect(action='comments')
def index(self, page=1, search=None, media_filter=None, **kwargs): """List comments with pagination and filtering. :param page: Page number, defaults to 1. :type page: int :param search: Optional search term to filter by :type search: unicode or None :param media_filter: Optional media ID to filter by :type media_filter: int or None :rtype: dict :returns: comments The list of :class:`~mediacore.model.comments.Comment` instances for this page. edit_form The :class:`mediacore.forms.admin.comments.EditCommentForm` instance, to be rendered for each instance in ``comments``. search The given search term, if any search_form The :class:`~mediacore.forms.admin.SearchForm` instance media_filter The given podcast ID to filter by, if any media_filter_title The media title for rendering if a ``media_filter`` was specified. """ comments = Comment.query.trash(False)\ .order_by(Comment.reviewed.asc(), Comment.created_on.desc()) # This only works since we only have comments on one type of content. # It will need re-evaluation if we ever add others. comments = comments.options(orm.eagerload('media')) if search is not None: comments = comments.search(search) media_filter_title = media_filter if media_filter is not None: comments = comments.filter( Comment.media.has(Media.id == media_filter)) media_filter_title = DBSession.query(Media.title).get(media_filter) media_filter = int(media_filter) return dict( comments=comments, edit_form=edit_form, media_filter=media_filter, media_filter_title=media_filter_title, search=search, search_form=search_form, )
def index(self, page=1, search=None, media_filter=None, **kwargs): """List comments with pagination and filtering. :param page: Page number, defaults to 1. :type page: int :param search: Optional search term to filter by :type search: unicode or None :param media_filter: Optional media ID to filter by :type media_filter: int or None :rtype: dict :returns: comments The list of :class:`~mediacore.model.comments.Comment` instances for this page. edit_form The :class:`mediacore.forms.admin.comments.EditCommentForm` instance, to be rendered for each instance in ``comments``. search The given search term, if any search_form The :class:`~mediacore.forms.admin.SearchForm` instance media_filter The given podcast ID to filter by, if any media_filter_title The media title for rendering if a ``media_filter`` was specified. """ comments = Comment.query.trash(False)\ .order_by(Comment.reviewed.asc(), Comment.created_on.desc()) # This only works since we only have comments on one type of content. # It will need re-evaluation if we ever add others. comments = comments.options(orm.eagerload('media')) if search is not None: comments = comments.search(search) media_filter_title = media_filter if media_filter is not None: comments = comments.filter(Comment.media.has(Media.id == media_filter)) media_filter_title = DBSession.query(Media.title).get(media_filter) media_filter = int(media_filter) return dict( comments = comments, edit_form = edit_form, media_filter = media_filter, media_filter_title = media_filter_title, search = search, search_form = search_form, )
def index(self, page=1, **kw): """List podcasts with pagination. :param page: Page number, defaults to 1. :type page: int :rtype: Dict :returns: podcasts The list of :class:`~mediacore.model.podcasts.Podcast` instances for this page. """ podcasts = DBSession.query(Podcast).options(orm.undefer("media_count")).order_by(Podcast.title) return dict(podcasts=podcasts)
def index(self, page=1, search=None, podcast_filter=None, **kwargs): """List media with pagination and filtering. :param page: Page number, defaults to 1. :type page: int :param search: Optional search term to filter by :type search: unicode or None :param podcast_filter: Optional podcast to filter by :type podcast_filter: int or None :rtype: dict :returns: media The list of :class:`~mediacore.model.media.Media` instances for this page. search The given search term, if any search_form The :class:`~mediacore.forms.admin.SearchForm` instance podcast_filter The given podcast ID to filter by, if any podcast_filter_title The podcast name for rendering if a ``podcast_filter`` was specified. podcast_filter_form The :class:`~mediacore.forms.admin.media.PodcastFilterForm` instance. """ media = Media.query.options(orm.undefer('comment_count_published')) if search: media = media.admin_search(search) else: media = media.order_by_status()\ .order_by(Media.publish_on.desc(), Media.modified_on.desc()) podcast_filter_title = podcast_filter if podcast_filter == 'Unfiled': media = media.filter(~Media.podcast.has()) elif podcast_filter is not None and podcast_filter != 'All Media': media = media.filter(Media.podcast.has(Podcast.id == podcast_filter)) podcast_filter_title = DBSession.query(Podcast.title).get(podcast_filter) podcast_filter = int(podcast_filter) return dict( media = media, podcast_filter = podcast_filter, podcast_filter_title = podcast_filter_title, podcast_filter_form = podcast_filter_form, search = search, search_form = search_form, )
def panda_update(self, media_id=None, file_id=None, video_id=None, **kwargs): if file_id: media_file = fetch_row(MediaFile, file_id) media_files = [media_file] elif media_id: media = fetch_row(Media, media_id) media_files = media.files storage = DBSession.query(PandaStorage).first() for media_file in media_files: storage.panda_helper().video_status_update(media_file, video_id) redirect(controller='/admin/media', action='edit', id=media_id)
def index(self, page=1, **kwargs): """List users with pagination. :param page: Page number, defaults to 1. :type page: int :rtype: Dict :returns: users The list of :class:`~mediacore.model.auth.User` instances for this page. """ users = DBSession.query(User).order_by(User.display_name, User.email_address) return dict(users=users)
def get_s3_storage(): """Helper for retrieving the current S3 Storage engine. We use this to get a boto connection to the configured bucket.""" c = tmpl_context._current_obj() if getattr(c, '_s3_engine', ''): if c._s3_engine == 'None': return None return c._s3_engine engine = DBSession.query(AmazonS3Storage)\ .filter(AmazonS3Storage.enabled == True)\ .first() c._s3_engine = engine or 'None' return engine
def index(self, page=1, **kw): """List podcasts with pagination. :param page: Page number, defaults to 1. :type page: int :rtype: Dict :returns: podcasts The list of :class:`~mediacore.model.podcasts.Podcast` instances for this page. """ podcasts = DBSession.query(Podcast)\ .options(orm.undefer('media_count'))\ .order_by(Podcast.title) return dict(podcasts=podcasts)
def index(self, page=1, **kwargs): """List groups with pagination. :param page: Page number, defaults to 1. :type page: int :rtype: Dict :returns: users The list of :class:`~mediacore.model.auth.Group` instances for this page. """ groups = DBSession.query(Group).order_by(Group.display_name, Group.group_name) return dict(groups=groups)
def func(*args, **kwargs): environ = kwargs["environ"] if "HTTP_AUTHORIZATION" not in environ: raise webob.exc.HTTPUnauthorized().exception method, credentials = environ["HTTP_AUTHORIZATION"].split(" ", 2) if method.strip().lower() == "basic": username, password = credentials.strip().decode("base64").split(":", 2) user = DBSession.query(User).filter("user_name='{username}'".format(username=username)).first() if user is None or not user.has_permission("admin") or not user.validate_password(password): raise webob.exc.HTTPUnauthorized().exception else: raise webob.exc.HTTPUnauthorized().exception return action(*args, **kwargs)
def fetch_row(mapped_class, pk=None, extra_filter=None, **kwargs): """Fetch a single row from the database or else trigger a 404. Typical usage is to fetch a single row for display or editing:: class PageController(object): @expose() def index(self, id): page = fetch_row(Page, id) return page.name @expose() def works_with_slugs_too(self, slug): page = fetch_row(Page, slug=slug) return page.name If the ``pk`` is string ``new`` then an empty instance of ``mapped_class`` is created and returned. This is helpful in admin controllers where you may reuse your *edit* action for *adding* too. :param mapped_class: An ORM-controlled model :param pk: A particular primary key to filter by. :type pk: int, ``None`` or ``"new"`` :param extra_filter: Extra filter arguments. :param \*\*kwargs: Any extra args are treated as column names to filter by. See :meth:`sqlalchemy.orm.Query.filter_by`. :returns: An instance of ``mapped_class``. :raises webob.exc.HTTPNotFound: If no result is found """ if pk == 'new': inst = mapped_class() return inst query = DBSession.query(mapped_class) if pk is not None: mapper = class_mapper(mapped_class, compile=False) query = query.filter(mapper.primary_key[0] == pk) if kwargs: query = query.filter_by(**kwargs) if extra_filter is not None: query = query.filter(extra_filter) try: return query.one() except NoResultFound: raise webob.exc.HTTPNotFound().exception
def save(self, id, email_address, display_name, login_details, delete=None, **kwargs): """Save changes or create a new :class:`~mediacore.model.auth.User` instance. :param id: User ID. If ``"new"`` a new user is created. :type id: ``int`` or ``"new"`` :returns: Redirect back to :meth:`index` after successful save. """ user = fetch_row(User, id) if delete: DBSession.delete(user) redirect(action='index', id=None) user.display_name = display_name user.email_address = email_address user.user_name = login_details['user_name'] password = login_details['password'] if password is not None and password != '': user.password = password if login_details['groups']: query = DBSession.query(Group).filter( Group.group_id.in_(login_details['groups'])) user.groups = list(query.all()) else: user.groups = [] DBSession.add(user) # Check if we're changing the logged in user's own password if user.id == request.perm.user.id \ and password is not None and password != '': DBSession.commit() # repoze.who sees the Unauthorized response and clears the cookie, # forcing a fresh login with the new password raise webob.exc.HTTPUnauthorized().exception redirect(action='index', id=None)
def __init__(self, *args, **kwargs): super(CategoriesController, self).__init__(*args, **kwargs) c.categories = Category.query.order_by(Category.name).populated_tree() counts = dict(DBSession.query(Category.id, Category.media_count_published)) c.category_counts = counts.copy() for cat, depth in c.categories.traverse(): count = counts[cat.id] for ancestor in cat.ancestors(): c.category_counts[ancestor.id] += count category_slug = request.environ['pylons.routes_dict'].get('slug', None) if category_slug: c.category = fetch_row(Category, slug=category_slug) c.breadcrumb = c.category.ancestors() c.breadcrumb.append(c.category)
def func(*args, **kwargs): environ = kwargs['environ'] if 'HTTP_AUTHORIZATION' not in environ: raise webob.exc.HTTPUnauthorized().exception method, credentials = environ['HTTP_AUTHORIZATION'].split(" ", 2) if method.strip().lower() == 'basic': username, password = credentials.strip().decode('base64').split( ":", 2) user = DBSession.query(User).filter( "user_name='{username}'".format(username=username)).first() if user is None or not user.has_permission( 'admin') or not user.validate_password(password): raise webob.exc.HTTPUnauthorized().exception else: raise webob.exc.HTTPUnauthorized().exception return action(*args, **kwargs)
def _info(self, media, podcast_slugs=None, include_embed=False): """Return a JSON-ready dict for the given media instance""" if media.podcast_id: media_url = url_for(controller='/media', action='view', slug=media.slug, podcast_slug=media.podcast.slug, qualified=True) else: media_url = url_for(controller="/media", action="view", slug=media.slug, qualified=True) if media.podcast_id is None: podcast_slug = None elif podcast_slugs: podcast_slug = podcast_slugs[media.podcast_id] else: podcast_slug = DBSession.query(Podcast.slug)\ .filter_by(id=media.podcast_id).scalar() thumbs = {} for size in config['thumb_sizes'][media._thumb_dir].iterkeys(): thumbs[size] = thumb(media, size, qualified=True) info = dict( id = media.id, slug = media.slug, url = media_url, title = media.title, author = media.author.name, type = media.type, podcast = podcast_slug, description = media.description, description_plain = media.description_plain, comment_count = media.comment_count_published, publish_on = unicode(media.publish_on), likes = media.likes, views = media.views, thumbs = thumbs, categories = dict((c.slug, c.name) for c in list(media.categories)), ) if include_embed: info['embed'] = unicode(helpers.embed_player(media)) return info
def __before__(self, *args, **kwargs): """Load all our category data before each request.""" BaseController.__before__(self, *args, **kwargs) c.categories = Category.query.order_by(Category.name).populated_tree() counts = dict(DBSession.query(Category.id, Category.media_count_published)) c.category_counts = counts.copy() for cat, depth in c.categories.traverse(): count = counts[cat.id] if count: for ancestor in cat.ancestors(): c.category_counts[ancestor.id] += count category_slug = request.environ["pylons.routes_dict"].get("slug", None) if category_slug: c.category = fetch_row(Category, slug=category_slug) c.breadcrumb = c.category.ancestors() c.breadcrumb.append(c.category)
def save(self, id, email_address, display_name, login_details, delete=None, **kwargs): """Save changes or create a new :class:`~mediacore.model.auth.User` instance. :param id: User ID. If ``"new"`` a new user is created. :type id: ``int`` or ``"new"`` :returns: Redirect back to :meth:`index` after successful save. """ user = fetch_row(User, id) if delete: DBSession.delete(user) redirect(action='index', id=None) user.display_name = display_name user.email_address = email_address user.user_name = login_details['user_name'] password = login_details['password'] if password is not None and password != '': user.password = password if login_details['groups']: query = DBSession.query(Group).filter(Group.group_id.in_(login_details['groups'])) user.groups = list(query.all()) else: user.groups = [] DBSession.add(user) # Check if we're changing the logged in user's own password logged_in_user = request.environ['repoze.who.identity']['user'] if user.user_id == logged_in_user.user_id \ and password is not None and password != '': DBSession.commit() # repoze.who sees the Unauthorized response and clears the cookie, # forcing a fresh login with the new password raise webob.exc.HTTPUnauthorized().exception redirect(action='index', id=None)
def insert_settings(defaults): """Insert the given setting if they don't exist yet. XXX: Does not include any support for MultiSetting. This approach won't work for that. We'll need to use sqlalchemy-migrate. :type defaults: list :param defaults: Key and value pairs :rtype: list :returns: Any settings that have just been created. """ inserted = [] try: settings_query = DBSession.query(Setting.key)\ .filter(Setting.key.in_([key for key, value in defaults])) existing_settings = set(x[0] for x in settings_query) except ProgrammingError: # If we are running paster setup-app on a fresh database with a # plugin which tries to use this function every time the # Environment.loaded event fires, the settings table will not # exist and this exception will be thrown, but its safe to ignore. # The settings will be created the next time the event fires, # which will likely be the first time the app server starts up. return inserted for key, value in defaults: if key in existing_settings: continue transaction = DBSession.begin_nested() try: s = Setting(key, value) DBSession.add(s) transaction.commit() inserted.append(s) except IntegrityError: transaction.rollback() if inserted: DBSession.commit() return inserted
def index(self, page=1, **kwargs): """List tags with pagination. :param page: Page number, defaults to 1. :type page: int :rtype: Dict :returns: tags The list of :class:`~mediacore.model.tags.Tag` instances for this page. tag_form The :class:`~mediacore.forms.admin.settings.tags.TagForm` instance. """ tags = DBSession.query(Tag)\ .options(orm.undefer('media_count'))\ .order_by(Tag.name) return dict( tags = tags, tag_form = tag_form, tag_row_form = tag_row_form, )
def add_panda_vars(**result): media = result['media'] result['encoding_dicts'] = encoding_dicts = {} result['video_dicts'] = video_dicts = {} result['profile_names'] = {} result['display_panda_refresh_message'] = False if not media.files: return result storage = DBSession.query(PandaStorage).first() if not storage: return result for file in media.files: encoding_dicts[file.id] = \ storage.panda_helper().get_associated_encoding_dicts(file) video_dicts[file.id] = \ storage.panda_helper().get_associated_video_dicts(file) if video_dicts or encoding_dicts: result['profile_names'] = storage.panda_helper().get_profile_ids_names() return result
def save(self, id, display_name, group_name, permissions, delete=None, **kwargs): """Save changes or create a new :class:`~mediacore.model.auth.Group` instance. :param id: Group ID. If ``"new"`` a new group is created. :type id: ``int`` or ``"new"`` :returns: Redirect back to :meth:`index` after successful save. """ group = fetch_row(Group, id) if delete: DBSession.delete(group) redirect(action='index', id=None) group.display_name = display_name group.group_name = group_name if permissions: query = DBSession.query(Permission).filter(Permission.permission_id.in_(permissions)) group.permissions = list(query.all()) else: group.permissions = [] DBSession.add(group) redirect(action='index', id=None)
def index(self, **kwargs): """List categories. :rtype: Dict :returns: categories The list of :class:`~mediacore.model.categories.Category` instances for this page. category_form The :class:`~mediacore.forms.admin.settings.categories.CategoryForm` instance. """ categories = Category.query.order_by(Category.name).options(orm.undefer("media_count")).populated_tree() tags = DBSession.query(Tag).options(orm.undefer("media_count")).order_by(Tag.name) return dict( categories=categories, category_form=category_form, category_row_form=category_row_form, tags=tags, tag_form=tag_form, tag_row_form=tag_row_form, )
def index(self, type=None, podcast=None, tag=None, category=None, search=None, max_age=None, min_age=None, order=None, offset=0, limit=10, published_after=None, published_before=None, featured=False, id=None, slug=None, include_embed=False, api_key=None, format="json", **kwargs): """Query for a list of media. :param type: Filter by '%s' or '%s'. Defaults to any type. :param podcast: A podcast slug (or slugs) to filter by. Use 0 to include only non-podcast media or 1 to include any podcast media. For multiple podcasts, separate the slugs with commas. :param tag: A tag slug to filter by. :param category: A category slug to filter by. :param search: A boolean search query. See http://dev.mysql.com/doc/refman/5.0/en/fulltext-boolean.html :param published_after: If given, only media published *on or after* this date is returned. The expected format is 'YYYY-MM-DD HH:MM:SS' (ISO 8601) and must include the year at a bare minimum. :param published_before: If given, only media published *on or before* this date is returned. The expected format is 'YYYY-MM-DD HH:MM:SS' (ISO 8601) and must include the year at a bare minimum. :param max_age: If given, only media published within this many days is returned. This is a convenience shortcut for publish_after and will override its value if both are given. :type max_age: int :param min_age: If given, only media published prior to this number of days ago will be returned. This is a convenience shortcut for publish_before and will override its value if both are given. :type min_age: int :param order: A column name and 'asc' or 'desc', seperated by a space. The column name can be any one of the returned columns. Defaults to newest media first (publish_on desc). :param offset: Where in the complete resultset to start returning results. Defaults to 0, the very beginning. This is useful if you've already fetched the first 50 results and want to fetch the next 50 and so on. :type offset: int :param limit: Number of results to return in each query. Defaults to 10. The maximum allowed value defaults to 50 and is set via :attr:`request.settings['api_media_max_results']`. :type limit: int :param featured: If nonzero, the results will only include media from the configured featured category, if there is one. :type featured: bool :param include_embed: If nonzero, the HTML for the embeddable player is included for all results. :type include_embed: bool :param id: Filters the results to include the one item with the given ID. Note that we still return a list. :type id: int or None :param slug: Filters the results to include the one item with the given slug. Note that we still return a list. :type slug: unicode or None :param api_key: The api access key if required in settings :type api_key: unicode or None :raises APIException: If there is an user error in the query params. :rtype: JSON-ready dict :returns: The returned dict has the following fields: count (int) The total number of results that match this query. media (list of dicts) A list of **media_info** dicts, as generated by the :meth:`_info <mediacore.controllers.api.media.MediaController._info>` method. The number of dicts in this list will be the lesser of the number of matched items and the requested limit. **Note**: unless the 'include_embed' option is specified, The returned **media_info** dicts will not include the 'embed' entry. """ if asbool(request.settings['api_secret_key_required']) \ and api_key != request.settings['api_secret_key']: return dict(error=AUTHERROR) if format not in ("json", "mrss"): return dict(error=INVALIDFORMATERROR % format) query = Media.query\ .published()\ .options(orm.undefer('comment_count_published')) # Basic filters if id: query = query.filter_by(id=id) if slug: query = query.filter_by(slug=slug) if type: query = query.filter_by(type=type) if podcast: podcast_query = DBSession.query(Podcast.id)\ .filter(Podcast.slug.in_(podcast.split(','))) query = query.filter(Media.podcast_id.in_(podcast_query)) if tag: tag = fetch_row(Tag, slug=tag) query = query.filter(Media.tags.contains(tag)) if category: category = fetch_row(Category, slug=category) query = query.filter(Media.categories.contains(category)) if max_age: published_after = datetime.now() - timedelta(days=int(max_age)) if min_age: published_before = datetime.now() - timedelta(days=int(min_age)) # FIXME: Parse the date and catch formatting problems before it # it hits the database. Right now support for partial # dates like '2010-02' is thanks to leniancy in MySQL. # Hopefully this leniancy is common to Postgres etc. if published_after: query = query.filter(Media.publish_on >= published_after) if published_before: query = query.filter(Media.publish_on <= published_before) query = query.order_by(get_order_by(order, order_columns)) # Search will supercede the ordering above if search: query = query.search(search) if featured: featured_cat = get_featured_category() if featured_cat: query = query.in_category(featured_cat) # Preload podcast slugs so we don't do n+1 queries podcast_slugs = dict(DBSession.query(Podcast.id, Podcast.slug)) # Rudimentary pagination support start = int(offset) end = start + min(int(limit), int(request.settings['api_media_max_results'])) if format == "mrss": request.override_template = "sitemaps/mrss.xml" return dict( media=query[start:end], title="Media Feed", ) media = [ self._info(m, podcast_slugs, include_embed) for m in query[start:end] ] return dict( media=media, count=query.count(), )
def _info(self, media, podcast_slugs=None, include_embed=False): """ Return a **media_info** dict--a JSON-ready dict for describing a media instance. :rtype: JSON-ready dict :returns: The returned dict has the following fields: author (unicode) The name of the :attr:`author <mediacore.model.media.Media.author>` of the media instance. categories (dict of unicode) A JSON-ready dict representing the categories the media instance is in. Keys are the unique :attr:`slugs <mediacore.model.podcasts.Podcast.slug>` for each category, values are the human-readable :attr:`title <mediacore.model.podcasts.podcast.Title>` of that category. id (int) The numeric unique :attr:`id <mediacore.model.media.Media.id>` of the media instance. slug (unicode) The more human readable unique identifier (:attr:`slug <mediacore.model.media.Media.slug>`) of the media instance. url (unicode) A permalink (HTTP) to the MediaCore view page for the media instance. embed (unicode) HTML code that can be used to embed the video in another site. title (unicode) The :attr:`title <mediacore.model.media.Media.title>` of the media instance. type (string, one of ['%s', '%s']) The :attr:`type <mediacore.model.media.Media.type>` of the media instance podcast (unicode or None) The :attr:`slug <mediacore.model.podcasts.Podcast.slug>` of the :class:`podcast <mediacore.model.podcasts.Podcast>` that the media instance has been published under, or None description (unicode) An XHTML :attr:`description <mediacore.model.media.Media.description>` of the media instance. description_plain (unicode) A plain text :attr:`description <mediacore.model.media.Media.description_plain>` of the media instance. comment_count (int) The number of published comments on the media instance. publish_on (unicode) The date of publishing in "YYYY-MM-DD HH:MM:SS" (ISO 8601) format. e.g. "2010-02-16 15:06:49" likes (int) The number of :attr:`like votes <mediacore.model.media.Media.likes>` that the media instance has received. views (int) The number of :attr:`views <mediacore.model.media.Media.views>` that the media instance has received. thumbs (dict) A dict of dicts containing URLs, width and height of different sizes of thumbnails. The default sizes are 's', 'm' and 'l'. Using medium for example:: medium_url = thumbs['m']['url'] medium_width = thumbs['m']['x'] medium_height = thumbs['m']['y'] """ if media.podcast_id: media_url = url_for(controller='/media', action='view', slug=media.slug, podcast_slug=media.podcast.slug, qualified=True) else: media_url = url_for_media(media, qualified=True) if media.podcast_id is None: podcast_slug = None elif podcast_slugs: podcast_slug = podcast_slugs[media.podcast_id] else: podcast_slug = DBSession.query(Podcast.slug)\ .filter_by(id=media.podcast_id).scalar() thumbs = {} for size in config['thumb_sizes'][media._thumb_dir].iterkeys(): thumbs[size] = thumb(media, size, qualified=True) info = dict( id=media.id, slug=media.slug, url=media_url, title=media.title, author=media.author.name, type=media.type, podcast=podcast_slug, description=media.description, description_plain=media.description_plain, comment_count=media.comment_count_published, publish_on=unicode(media.publish_on), likes=media.likes, views=media.views, thumbs=thumbs, categories=dict((c.slug, c.name) for c in list(media.categories)), ) if include_embed: info['embed'] = unicode(helpers.embed_player(media)) return info
def getStorageEngine(): return DBSession.query(StorageEngine).filter( "enabled = 1 and engine_type = 'LocalFileStorage'").first()
def custom_groups(cls, *columns): query_object = columns or (Group, ) return DBSession.query(*query_object).\ filter( not_(Group.group_name.in_([u'anonymous', u'authenticated'])) )
def by_user_name(cls, username): # TODO: Move this function to User.query return DBSession.query(cls).filter(cls.user_name == username).first()
def __before__(self, *args, **kwargs): """Load all our settings before each request.""" BaseController.__before__(self, *args, **kwargs) from mediacore.model import Setting tmpl_context.settings = dict(DBSession.query(Setting.key, Setting))
def by_user_name(cls, username): """A class method that permits to search users based on their user_name attribute. """ return DBSession.query(cls).filter(cls.user_name==username).first()
def by_email_address(cls, email): # TODO: Move this function to User.query return DBSession.query(cls).filter(cls.email_address == email).first()