Example #1
0
def admin_toggle(request):
    if request.registry.settings.get('admin_locked'):
        raise action_error(message='additional admin users have been prohibited', code=403)
    request.session['admin'] = not request.session.get('admin', False)
    #log.debug('admin - {0} - {1}'.format(request.session['id'], request.session['admin']))
    log_event(request, admin=request.session['admin'])
    return action_ok()
Example #2
0
def settings(request):
    """
    Surface settings as an API.
    This allows clients to qurey server settup rather than having to hard code bits into the clients
    """
    if method_put_router(None, request):
        # with PUT requests, update settings
        #  only changing in production is bit over zelious #request.registry.settings.get('karakara.server.mode')!='production'
        if request.registry.settings.get('karakara.server.mode') != 'test' and not is_admin(request):
            raise action_error(message='Settings modification for non admin users forbidden', code=403)

        update_settings(request.registry.settings, request.params)

        send_socket_message(request, 'settings')  # Ensure that the player interface is notifyed of an update
        log_event(request, method='update', admin=is_admin(request))
    else:
        log_event(request, method='view', admin=is_admin(request))

    setting_regex = re.compile(request.registry.settings.get('api.settings.regex', 'TODOmatch_nothing_regex'))
    return action_ok(
        data={
            'settings': {
                setting_key: request.registry.settings.get(setting_key)
                for setting_key in
                [key for key in request.registry.settings.keys() if setting_regex.match(key)]
            }
        }
    )
Example #3
0
def admin_lock(request):
    request.registry.settings[
        'admin_locked'] = not request.registry.settings.get(
            'admin_locked', False)
    #log.debug('admin locked - {0}'.format(request.registry.settings['admin_locked']))
    log_event(request, admin_locked=request.registry.settings['admin_locked'])
    return action_ok()
Example #4
0
def list(request):
    """
    Browse tacks by 'list'

    List all the tracks listed in trackids

    return search dict (see above) + tracks (a list of tracks with basic details)
    """
    search_params = get_search_params(request)
    log_event(request, tags=search_params.tags, keywords=search_params.keywords)
    cache_key = "search_list_{0}:{1}".format(search_version, search_cache_key(search_params))
    etag(request, cache_key)  # Abort if 'etag' match

    def get_list():
        action_return = search(search_params)
        log.debug('cache gen - get_list')

        _trackids = action_return['data']['trackids']

        tracks = DBSession.query(Track).\
                            filter(Track.id.in_(_trackids)).\
                            options(\
                                joinedload(Track.tags),\
                                joinedload(Track.attachments),\
                                joinedload('tags.parent'),\
                                #defer(Track.lyrics),\
                            )

        action_return['data'].update({
            'tracks': [track.to_dict('full', exclude_fields='lyrics,attachments') for track in tracks],
        })
        return action_return

    return cache.get_or_create(cache_key, get_list)
Example #5
0
def admin_toggle(request):
    if request.registry.settings.get('admin_locked'):
        raise action_error(
            message='additional admin users have been prohibited', code=403)
    request.session['admin'] = not request.session.get('admin', False)
    #log.debug('admin - {0} - {1}'.format(request.session['id'], request.session['admin']))
    log_event(request, admin=request.session['admin'])
    return action_ok()
Example #6
0
def track_view(request):
    """
    View individual track details

    This method has two levels of cache:
     - track_dict cache - cache the track data (should not change)
     - track_dict + queue_dict - every time this track is modifyed in the queue it will invalidate the cache

    The track dicts that are fetched from the DB are cached with infinate expiray
    time as they will not change once the system is running

    It might be wise to add an cache_clear to the import_tracks operation
    (currently this wont be needed as the basic implementation is python_dict,
     which is invalidatated on shutdown,
     but if we were going to use memcache or redis then this is nessisary)
    """
    id = request.matchdict['id']

    # Search, find and redirect for shortened track id's
    # This is kind of a hack to allow id's shorter than 64 characters in tests
    if len(id) != 64:
        full_id = first(DBSession.query(Track.id).filter(Track.id.like('{0}%'.format(id))).first())
        if full_id and len(id) != len(full_id):
            raise HTTPFound(location=track_url(full_id))

    def get_track_dict(id):
        try:
            log.debug('cache gen - track_dict for {0}'.format(id))
            track_dict = get_track_dict_full(id)
            track_dict['title'] = track_title(track_dict['tags'])
            return track_dict
        except (KeyError, TypeError):
            return cache_none

    def get_track_and_queued_dict(id):
        track = cache.get_or_create("track_dict:{0}".format(id), lambda: get_track_dict(id))
        if not track:
            return cache_none
        log.debug('cache gen - track_queue_dict for {0}'.format(id))
        def queue_item_list_to_dict(queue_items):
            return [queue_item.to_dict('full', exclude_fields='track_id,session_owner') for queue_item in queue_items]
        queue_item_for_track_dict = subdict(queue_item_for_track(request, DBSession, track['id']), {'played', 'pending'})
        track['queue'] = {k: queue_item_list_to_dict(v) for k, v in queue_item_for_track_dict.items()}
        return track

    # TODO: Put some thought into the idea that a malicious c**k could deliberately
    #       perform repeated calls knowing a cache key could well be created and
    #       take the system down with an 'out of memory'. Then again, if they want to
    #       attack this system with brains there is very little we can do to prevent
    #       every attack vector.
    track = cache.get_or_create(track_key(id), lambda: get_track_and_queued_dict(id))
    if not track:
        raise action_error(message='track {0} not found'.format(id), code=404)

    log_event(request, track_id=id, title=track['title'])

    return action_ok(data={'track': track})
Example #7
0
def session_created(event):
    """
    On first connect, log the devices user agent
    """
    # Quick hack to prevent flooding of event logs for
    #  - Users with cookies turned off (hopefully not to many of these in the real world).
    #  - Test users that create a new request every time.
    ip = event.request.environ.get('REMOTE_ADDR')
    if ip in known_ip_address:
        return
    known_ip_address.add(ip)

    log_event(
        event.request,
        device=httpagentparser.detect(event.request.environ.get('HTTP_USER_AGENT')),
    )
Example #8
0
def session_created(event):
    """
    On first connect, log the devices user agent
    """
    # Quick hack to prevent flooding of event logs for
    #  - Users with cookies turned off (hopefully not to many of these in the real world).
    #  - Test users that create a new request every time.
    ip = event.request.environ.get('REMOTE_ADDR')
    if ip in known_ip_address:
        return
    known_ip_address.add(ip)

    log_event(
        event.request,
        device=httpagentparser.detect(
            event.request.environ.get('HTTP_USER_AGENT')),
    )
Example #9
0
def feedback_view(request):
    """
    Feedback
    """
    if request.method == 'GET':
        if request.session.get('admin'):
            log.info('admin viewed feedback')
            return action_ok(data={'feedback': [feedback.to_dict() for feedback in DBSession.query(Feedback)]})
        return action_ok()

    if not request.params.get('details'):
        raise action_error('Please provide feedback details', code=400)

    feedback = Feedback()
    for field, value in request.params.items():
        try: setattr(feedback, field,value)
        except: pass
    feedback.environ = strip_non_base_types(request.environ)
    DBSession.add(feedback)

    log.info('feedback - {0}'.format(request.params.get('details')))
    log_event(request, **request.params)
    return action_ok(message='Feedback received, thank you!')
Example #10
0
def settings(request):
    """
    Surface settings as an API.
    This allows clients to qurey server settup rather than having to hard code bits into the clients
    """
    if method_put_router(None, request):
        # with PUT requests, update settings
        #  only changing in production is bit over zelious #request.registry.settings.get('karakara.server.mode')!='production'
        if request.registry.settings.get(
                'karakara.server.mode') != 'test' and not is_admin(request):
            raise action_error(
                message='Settings modification for non admin users forbidden',
                code=403)

        update_settings(request.registry.settings, request.params)

        send_socket_message(
            request, 'settings'
        )  # Ensure that the player interface is notifyed of an update
        log_event(request, method='update', admin=is_admin(request))
    else:
        log_event(request, method='view', admin=is_admin(request))

    setting_regex = re.compile(
        request.registry.settings.get('api.settings.regex',
                                      'TODOmatch_nothing_regex'))
    return action_ok(
        data={
            'settings': {
                setting_key: request.registry.settings.get(setting_key)
                for setting_key in [
                    key for key in request.registry.settings.keys()
                    if setting_regex.match(key)
                ]
            }
        })
Example #11
0
def track_view(request):
    """
    View individual track details

    This method has two levels of cache:
     - track_dict cache - cache the track data (should not change)
     - track_dict + queue_dict - every time this track is modifyed in the queue it will invalidate the cache

    The track dicts that are fetched from the DB are cached with infinate expiray
    time as they will not change once the system is running

    It might be wise to add an cache_clear to the import_tracks operation
    (currently this wont be needed as the basic implementation is python_dict,
     which is invalidatated on shutdown,
     but if we were going to use memcache or redis then this is nessisary)
    """
    id = request.matchdict['id']

    # Search, find and redirect for shortened track id's
    # This is kind of a hack to allow id's shorter than 64 characters in tests
    if len(id) != 64:
        full_id = first(
            DBSession.query(Track.id).filter(Track.id.like(
                '{0}%'.format(id))).first())
        if full_id and len(id) != len(full_id):
            raise HTTPFound(location=track_url(full_id))

    def get_track_dict(id):
        try:
            log.debug('cache gen - track_dict for {0}'.format(id))
            track_dict = get_track_dict_full(id)
            track_dict['title'] = track_title(track_dict['tags'])
            return track_dict
        except (KeyError, TypeError):
            return cache_none

    def get_track_and_queued_dict(id):
        track = cache.get_or_create("track_dict:{0}".format(id),
                                    lambda: get_track_dict(id))
        if not track:
            return cache_none
        log.debug('cache gen - track_queue_dict for {0}'.format(id))

        def queue_item_list_to_dict(queue_items):
            return [
                queue_item.to_dict('full',
                                   exclude_fields='track_id,session_owner')
                for queue_item in queue_items
            ]

        queue_item_for_track_dict = subdict(
            queue_item_for_track(request, DBSession, track['id']),
            {'played', 'pending'})
        track['queue'] = {
            k: queue_item_list_to_dict(v)
            for k, v in queue_item_for_track_dict.items()
        }
        return track

    # TODO: Put some thought into the idea that a malicious c**k could deliberately
    #       perform repeated calls knowing a cache key could well be created and
    #       take the system down with an 'out of memory'. Then again, if they want to
    #       attack this system with brains there is very little we can do to prevent
    #       every attack vector.
    track = cache.get_or_create(track_key(id),
                                lambda: get_track_and_queued_dict(id))
    if not track:
        raise action_error(message='track {0} not found'.format(id), code=404)

    log_event(request, track_id=id, title=track['title'])

    return action_ok(data={'track': track})
Example #12
0
def admin_lock(request):
    request.registry.settings['admin_locked'] = not request.registry.settings.get('admin_locked', False)
    #log.debug('admin locked - {0}'.format(request.registry.settings['admin_locked']))
    log_event(request, admin_locked=request.registry.settings['admin_locked'])
    return action_ok()
Example #13
0
def tags(request):
    """
    Browse tacks by 'tag'

    if there is only one track then redirect to show the single track
    if the number of tracks being browsed is less then 15 then redirect to 'list'

    Get all tags from all the tracks trackid's provided and count the number of occurances.

    return search dict + sub_tags( a list of all tags with counts )
    """
    search_params = get_search_params(request)
    log_event(request, tags=search_params.tags, keywords=search_params.keywords)
    cache_key = "search_tags_{0}:{1}".format(search_version, search_cache_key(search_params))
    etag(request, cache_key)  # Abort if 'etag' match

    action_return = search(search_params)

    tags             = action_return['data']['tags']
    keywords         = action_return['data']['keywords']
    sub_tags_allowed = action_return['data']['sub_tags_allowed']
    trackids         = action_return['data']['trackids']


    # If html request then we want to streamline browsing and remove redundent extra steps to get to the track list or track
    # TODO: I'm unsure if these 'raise' returns can be cached - right now this call always makes 2 hits to the cache search() and get_action_return_with_sub_tags()
    if request.matchdict['format'] == 'html':
        # If there is only one track present - abort and redirect to single track view, there is no point in doing any more work
        if len(trackids)== 1:
            # TODO if the hostname has a port, the port is stripped ... WTF?!
            raise HTTPFound(location=track_url(trackids[0]))
        # If there is only a small list, we might as well just show them all
        if len(trackids) < request.registry.settings['karakara.search.list.threshold']:
            raise HTTPFound(location=search_url(tags=tags, keywords=keywords, route='search_list'))

    def get_action_return_with_sub_tags():
        log.debug('cache gen - subtags')
        # Get a list of all the tags for all the trackids
        # Group them by tag name
        # only allow tags in the allowed list (there could be 100's of title's and from's), we just want the browsable ones
        alias_parent_tag = aliased(Tag)

        # The SQL engine cannot cope with an 'IN' clause with over 1000 enties
        # The solution is a convoluted way of batching the requests up into chunks
        # of 1000 and merging the results

        def get_sub_tags_batch(trackids):

            def slice_batch(trackids, batch_size):
                for batch_number in range(0, (len(trackids)//batch_size)+1):
                    yield trackids[batch_number * batch_size:(batch_number + 1) * batch_size]

            tags = {}
            for trackids_batch in slice_batch(trackids, 900):
                for tag, count in get_sub_tags(trackids_batch):
                    tag_dict = tag.to_dict('full')
                    id = tag_dict['id']
                    tags.setdefault(id, tag_dict).setdefault('count', 0)
                    tags[id]['count'] += count
            return tags.values()

        def get_sub_tags(trackids):
            return DBSession.query(Tag,
                func.count(TrackTagMapping.tag_id)).\
                join(TrackTagMapping).\
                join(alias_parent_tag, Tag.parent).\
                filter(TrackTagMapping.track_id.in_(trackids)).\
                filter(alias_parent_tag.name.in_(sub_tags_allowed)).\
                group_by('tag_1.id', alias_parent_tag.name, Tag.id).\
                order_by(alias_parent_tag.name, Tag.name).\
                options(joinedload(Tag.parent)
            )

        # This if branch con probably be removed - we don't want differnt behaviour for differnt db engines
        #  TODO: need to check if postgres can actually handle this properly
        if request.registry.settings.get('sqlalchemy.url', '').startswith('sqlite'):
            sub_tags = [tag for tag in get_sub_tags_batch(trackids)]
        else:
            sub_tags = [update_dict(tag.to_dict('full'), {'count': count}) for tag, count in get_sub_tags(trackids)]

        # AllanC - RRRRRRRAAAAAAAAA!!!! Postgres creates an alias 'tag_1.id' under the hood, but wont actually return results unless it's in the group_by clause
        #          it works without the tag_1.id in sqllite. So right now, the SQLLite version is broken with 'tag_1' and postgres dies without it.
        #          is there a way to alias this properly?
        # tried alias's 'tag_1.id','tag_2.name'

        action_return['data'].update({
            'sub_tags': sub_tags,
        })
        return action_return

    return cache.get_or_create(cache_key, get_action_return_with_sub_tags)