Ejemplo n.º 1
0
def track_view(request):
    """
    View individual track details

    This method has two levels of cache:
     - track_dict cache - cache the track data (should not change)
     - track_dict + queue_dict - every time this track is modifyed in the queue it will invalidate the cache

    The track dicts that are fetched from the DB are cached with infinate expiray
    time as they will not change once the system is running

    It might be wise to add an cache_clear to the import_tracks operation
    (currently this wont be needed as the basic implementation is python_dict,
     which is invalidatated on shutdown,
     but if we were going to use memcache or redis then this is nessisary)
    """
    id = request.matchdict['id']

    # Search, find and redirect for shortened track id's
    # This is kind of a hack to allow id's shorter than 64 characters in tests
    if len(id) != 64:
        full_id = first(DBSession.query(Track.id).filter(Track.id.like('{0}%'.format(id))).first())
        if full_id and len(id) != len(full_id):
            raise HTTPFound(location=track_url(full_id))

    def get_track_dict(id):
        try:
            log.debug('cache gen - track_dict for {0}'.format(id))
            track_dict = get_track_dict_full(id)
            track_dict['title'] = track_title(track_dict['tags'])
            return track_dict
        except (KeyError, TypeError):
            return cache_none

    def get_track_and_queued_dict(id):
        track = cache.get_or_create("track_dict:{0}".format(id), lambda: get_track_dict(id))
        if not track:
            return cache_none
        log.debug('cache gen - track_queue_dict for {0}'.format(id))
        def queue_item_list_to_dict(queue_items):
            return [queue_item.to_dict('full', exclude_fields='track_id,session_owner') for queue_item in queue_items]
        queue_item_for_track_dict = subdict(queue_item_for_track(request, DBSession, track['id']), {'played', 'pending'})
        track['queue'] = {k: queue_item_list_to_dict(v) for k, v in queue_item_for_track_dict.items()}
        return track

    # TODO: Put some thought into the idea that a malicious c**k could deliberately
    #       perform repeated calls knowing a cache key could well be created and
    #       take the system down with an 'out of memory'. Then again, if they want to
    #       attack this system with brains there is very little we can do to prevent
    #       every attack vector.
    track = cache.get_or_create(track_key(id), lambda: get_track_and_queued_dict(id))
    if not track:
        raise action_error(message='track {0} not found'.format(id), code=404)

    log_event(request, track_id=id, title=track['title'])

    return action_ok(data={'track': track})
Ejemplo n.º 2
0
    def _get_source_filename(self, source_type):
        """
        Lookup metadata from source_filename
        From metadata lookup tags file (identifyable with .txt extension)
        Return relative path

        This is kind of reinventing the wheel as we do have code in `processmedia2`
        to parse this, but we want to reduce code dependencys and this is a single
        fairly understandbale, self contained, one off.
        """
        SOURCE_TYPE_EXTENSION_LOOKUP = {
            'tag': ('txt', ),
            'subtitles': ('ssa', 'srt'),
        }
        return os.path.join(
            self.path_source,
            first(
                filedata.get('relative')
                for filename, filedata in self._meta.get('scan', {}).items()
                if any(filename.endswith('.{}'.format(extension)) for extension in SOURCE_TYPE_EXTENSION_LOOKUP[source_type])
            ) or ''
        )
Ejemplo n.º 3
0
    def _get_source_filename(self, source_type):
        """
        Lookup metadata from source_filename
        From metadata lookup tags file (identifyable with .txt extension)
        Return relative path

        This is kind of reinventing the wheel as we do have code in `processmedia2`
        to parse this, but we want to reduce code dependencys and this is a single
        fairly understandbale, self contained, one off.
        """
        SOURCE_TYPE_EXTENSION_LOOKUP = {
            'tag': ('txt', ),
            'subtitles': ('ssa', 'srt'),
        }
        return os.path.join(
            self.path_source,
            first(
                filedata.get('relative')
                for filename, filedata in self._meta.get('scan', {}).items()
                if any(
                    filename.endswith('.{}'.format(extension)) for extension in
                    SOURCE_TYPE_EXTENSION_LOOKUP[source_type])) or '')
Ejemplo n.º 4
0
def attachment_location(track={}, attachment_type='preview'):
    return first(attachment_location(track, attachment_type))
Ejemplo n.º 5
0
def attachment_location(track={}, attachment_type='preview'):
    return first(attachment_location(track, attachment_type))
Ejemplo n.º 6
0
def track_view(request):
    """
    View individual track details

    This method has two levels of cache:
     - track_dict cache - cache the track data (should not change)
     - track_dict + queue_dict - every time this track is modifyed in the queue it will invalidate the cache

    The track dicts that are fetched from the DB are cached with infinate expiray
    time as they will not change once the system is running

    It might be wise to add an cache_clear to the import_tracks operation
    (currently this wont be needed as the basic implementation is python_dict,
     which is invalidatated on shutdown,
     but if we were going to use memcache or redis then this is nessisary)
    """
    id = request.matchdict['id']

    # Search, find and redirect for shortened track id's
    # This is kind of a hack to allow id's shorter than 64 characters in tests
    if len(id) != 64:
        full_id = first(
            DBSession.query(Track.id).filter(Track.id.like(
                '{0}%'.format(id))).first())
        if full_id and len(id) != len(full_id):
            raise HTTPFound(location=track_url(full_id))

    def get_track_dict(id):
        try:
            log.debug('cache gen - track_dict for {0}'.format(id))
            track_dict = get_track_dict_full(id)
            track_dict['title'] = track_title(track_dict['tags'])
            return track_dict
        except (KeyError, TypeError):
            return cache_none

    def get_track_and_queued_dict(id):
        track = cache.get_or_create("track_dict:{0}".format(id),
                                    lambda: get_track_dict(id))
        if not track:
            return cache_none
        log.debug('cache gen - track_queue_dict for {0}'.format(id))

        def queue_item_list_to_dict(queue_items):
            return [
                queue_item.to_dict('full',
                                   exclude_fields='track_id,session_owner')
                for queue_item in queue_items
            ]

        queue_item_for_track_dict = subdict(
            queue_item_for_track(request, DBSession, track['id']),
            {'played', 'pending'})
        track['queue'] = {
            k: queue_item_list_to_dict(v)
            for k, v in queue_item_for_track_dict.items()
        }
        return track

    # TODO: Put some thought into the idea that a malicious c**k could deliberately
    #       perform repeated calls knowing a cache key could well be created and
    #       take the system down with an 'out of memory'. Then again, if they want to
    #       attack this system with brains there is very little we can do to prevent
    #       every attack vector.
    track = cache.get_or_create(track_key(id),
                                lambda: get_track_and_queued_dict(id))
    if not track:
        raise action_error(message='track {0} not found'.format(id), code=404)

    log_event(request, track_id=id, title=track['title'])

    return action_ok(data={'track': track})