def get_oel_releases(data): data = check_validate_range(data) releases = release_view_items.get_releases(page=data['offset']) tmp = unpack_paginator(releases) tmp['items'] = unpack_releases(tmp['items']) return getDataResponse(tmp)
def get_series_id(data): assert "id" in data, "You must specify a id to query for." series, releases, watch, watchlists, progress, latest, latest_dict, most_recent, \ latest_str, rating, total_watches, similar_series = item_view_items.load_series_data(data['id']) ret = unpack_series_page(series) ret['releases'] = unpack_releases(releases) ret['watch'] = watch ret['watchlists'] = watchlists ret['progress'] = progress ret['latest'] = latest_dict ret['most_recent'] = most_recent ret['latest_str'] = latest_str ret['rating'] = rating ret['total_watches'] = total_watches ret['similar_series'] = [ { 'id' : sid, 'title' : sname, } for sname, sid in similar_series ] return getDataResponse(ret)
def get_series(data): data = check_validate_range(data) seq = series_view_items.getSeries(letter=data['prefix'], page=data['offset']) tmp = unpack_paginator(seq) tmp['items'] = unpack_series(tmp['items']) return getDataResponse(tmp)
def get_feeds(data): data = check_validate_range(data) feeds = Feeds.query.order_by(desc(Feeds.published)) feeds = feeds.options(joinedload('tags')) feeds = feeds.options(joinedload('authors')) feed_entries = feeds.paginate(data['offset'], app.config['SERIES_PER_PAGE'], False) tmp = unpack_paginator(feed_entries) rows = tmp['items'] tmp['items'] = [{ 'title': row.title, 'contents': row.contents, 'guid': row.guid, 'linkurl': row.linkurl, 'published': row.published, 'updated': row.updated, 'srcname': row.srcname, 'region': row.region, 'tags': [tag.tag for tag in row.tags], 'authors': [auth.name for auth in row.authors], } for row in rows] return getDataResponse(tmp)
def get_translated_releases(data): data = check_validate_range(data) releases = release_view_items.get_releases(page = data['offset'], srctype='translated') tmp = unpack_paginator(releases) tmp['items'] = unpack_releases(tmp['items']) return getDataResponse(tmp)
def get_publisher_id(data): assert "id" in data, "You must specify a id to query for." assert is_integer(data['id']), "The 'id' member must be an integer, or a string that can cleanly cast to one." a_id = int(data['id']) pub, series = item_view_items.get_publisher_id(a_id) if not pub: return getResponse(error=True, message='No item found for that ID!') data = unpack_tag_genre_publisher(pub, series) return getDataResponse(data)
def get_author_id(data): assert "id" in data, "You must specify a id to query for." assert is_integer(data['id']), "The 'id' member must be an integer, or a string that can cleanly cast to one." a_id = int(data['id']) author, series = item_view_items.get_author(a_id) if not author: return getResponse(error=True, message='No item found for that ID!') data = unpack_artist_or_illustrator(author, series) return getDataResponse(data)
def get_groups(data): data = check_validate_range(data) seq = sequence_view_items.get_groups_entries(data['prefix'], data['offset']) tmp = unpack_paginator(seq) rows = tmp['items'] tmp['items'] = [{ 'name' : row.name, 'id' : row.id, } for row in rows] return getDataResponse(tmp)
def get_genres(data): data = check_validate_range(data) seq = sequence_view_items.get_genre_entries(data['prefix'], data['offset']) tmp = unpack_paginator(seq) rows = tmp['items'] tmp['items'] = [{ 'name': row.genre, 'id': row.id, } for row in rows] return getDataResponse(tmp)
def get_series_id(data): assert "id" in data, "You must specify a id to query for." series, releases, watch, watchlists, progress, latest, latest_dict, most_recent, latest_str, rating, total_watches = item_view_items.load_series_data(data['id']) ret = unpack_series_page(series) ret['releases'] = unpack_releases(releases) ret['watch'] = watch ret['watchlists'] = watchlists ret['progress'] = progress ret['latest'] = latest_dict ret['most_recent'] = most_recent ret['latest_str'] = latest_str ret['rating'] = rating ret['total_watches'] = total_watches return getDataResponse(ret)
def get_search_title(data): assert "title" in data, "You must specify a title to query for." assert isinstance(data['title'], str), "The 'title' member must be a string." assert len( data['title']) > 1, "You must specify a non-empty title to query for." data, searchtermclean = search_views.title_search(data['title']) ret = { 'cleaned_search': searchtermclean, 'results': [{ 'sid': sid, 'match': [(match[3], match[2]) for match in results['results']] } for sid, results in data.items()], } return getDataResponse(ret)
def get_search_title(data): assert "title" in data, "You must specify a title to query for." assert isinstance(data['title'], str), "The 'title' member must be a string." assert len(data['title']) > 1, "You must specify a non-empty title to query for." data, searchtermclean = search_views.title_search(data['title']) ret = { 'cleaned_search' : searchtermclean, 'results' : [ { 'sid' : sid, 'match' : [ (match[3], match[2]) for match in results['results'] ] } for sid, results in data.items() ], } return getDataResponse(ret)
def get_feeds(data): data = check_validate_range(data) feeds = Feeds.query.order_by(desc(Feeds.published)) feeds = feeds.options(joinedload('tags')) feeds = feeds.options(joinedload('authors')) feed_entries = feeds.paginate(data['offset'], app.config['SERIES_PER_PAGE'], False) tmp = unpack_paginator(feed_entries) rows = tmp['items'] tmp['items'] = [{ 'title' : row.title, 'contents' : row.contents, 'guid' : row.guid, 'linkurl' : row.linkurl, 'published' : row.published, 'updated' : row.updated, 'srcname' : row.srcname, 'region' : row.region, 'tags' : [tag.tag for tag in row.tags], 'authors' : [auth.name for auth in row.authors], } for row in rows] return getDataResponse(tmp)
def get_search_advanced(data): if not search_views.search_check_ok(data): return getResponse(error=True, message="Insufficent filter parameters!") queried_columns = [Series] col_names = ['id', 'title'] join_on = [] if 'include-results' in data: print("Include results:", data['include-results']) if 'description' in data['include-results']: col_names.append("description") if 'covers' in data['include-results']: col_names.append("covers") join_on.append("covers") if 'tags' in data['include-results']: col_names.append("tags") join_on.append("tags") if 'genres' in data['include-results']: col_names.append("genres") join_on.append("genres") # These two columns are automatically inserted into the return dataset # we need to define them to unpack them properly col_names.extend(['latest_published', 'release_count']) series_query = search_views.do_advanced_search(data, queried_columns=queried_columns) for join in join_on: series_query = series_query.options(joinedload(join)) series_query = series_query.limit(100) series = series_query.all() ret = [ { col_name : getattr(tmp, col_name) for col_name in col_names } for tmp in series ] if 'covers' in join_on: for item in ret: item['covers'] = [ { 'url' : '://www.wlnupdates.com/cover-img/{}/'.format(tmp.id), 'description' : tmp.description, 'volume' : tmp.volume, 'chapter' : tmp.chapter, } for tmp in item['covers'] ] if 'tags' in join_on: for item in ret: item['tags'] = [ { 'id' : tmp.id, 'tag' : tmp.tag, } for tmp in item['tags'] ] if 'genres' in join_on: for item in ret: item['genres'] = [ { 'id' : tmp.id, 'genre' : tmp.genre, } for tmp in item['genres'] ] # Convert the datetime object to timestamps to for row in ret: row['latest_published'] = row['latest_published'].timestamp() if row['latest_published'] else None return getDataResponse(ret)
def get_translated_series(data): data = check_validate_range(data) seq = series_view_items.getSeries(letter=data['prefix'], page=data['offset'], type='translated') tmp = unpack_paginator(seq) tmp['items'] = unpack_series(tmp['items']) return getDataResponse(tmp)
def get_search_advanced(data): if not search_views.search_check_ok(data): return getResponse(error=True, message="Insufficent filter parameters!") queried_columns = [Series] col_names = ['id', 'title'] join_on = [] if 'include-results' in data: print("Include results:", data['include-results']) if 'description' in data['include-results']: col_names.append("description") if 'covers' in data['include-results']: col_names.append("covers") join_on.append("covers") if 'tags' in data['include-results']: col_names.append("tags") join_on.append("tags") if 'genres' in data['include-results']: col_names.append("genres") join_on.append("genres") # These two columns are automatically inserted into the return dataset # we need to define them to unpack them properly col_names.extend(['latest_published', 'release_count']) series_query = search_views.do_advanced_search( data, queried_columns=queried_columns) for join in join_on: series_query = series_query.options(joinedload(join)) series_query = series_query.limit(100) series = series_query.all() ret = [{col_name: getattr(tmp, col_name) for col_name in col_names} for tmp in series] if 'covers' in join_on: for item in ret: item['covers'] = [{ 'url': '://www.wlnupdates.com/cover-img/{}/'.format(tmp.id), 'description': tmp.description, 'volume': tmp.volume, 'chapter': tmp.chapter, } for tmp in item['covers']] if 'tags' in join_on: for item in ret: item['tags'] = [{ 'id': tmp.id, 'tag': tmp.tag, } for tmp in item['tags']] if 'genres' in join_on: for item in ret: item['genres'] = [{ 'id': tmp.id, 'genre': tmp.genre, } for tmp in item['genres']] # Convert the datetime object to timestamps to for row in ret: row['latest_published'] = row['latest_published'].timestamp( ) if row['latest_published'] else None return getDataResponse(ret)
def enumerate_search_genres(data): genres = search_views.get_genres() resp = [(genre.id, genre.genre, genre.genre_instances) for genre in genres] return getDataResponse(resp)
def enumerate_search_tags(data): tags = search_views.get_tags() resp = [(tag.id, tag.tag, tag.tag_instances) for tag in tags] return getDataResponse(resp)
def get_group_id(data): assert "id" in data, "You must specify a id to query for." assert is_integer( data['id'] ), "The 'id' member must be an integer, or a string that can cleanly cast to one." if 'page' in data: assert is_integer( data['page'] ), "The 'page' member must be an integer, or a string that can cleanly cast to one if it is present." s_id = int(data['id']) page = int(data.get('page', '1')) group, names, feeds, items_raw, series_items = item_view_items.get_group_id( s_id, page) if not group: return getResponse(error=True, message="Group with id %s not found!" % s_id) try: feed_entries = feeds.paginate(page, app.config['SERIES_PER_PAGE']) except werkzeug.exceptions.NotFound: feed_entries = None try: release_entries = items_raw.paginate(page, app.config['SERIES_PER_PAGE']) except werkzeug.exceptions.NotFound: release_entries = None feed_tmp = unpack_paginator(feed_entries) feed_items = [{ 'title': row.title, 'contents': row.contents, 'guid': row.guid, 'linkurl': row.linkurl, 'published': row.published, 'updated': row.updated, 'srcname': row.srcname, 'region': row.region, 'tags': [tag.tag for tag in row.tags], } for row in feed_tmp['items']] release_tmp = unpack_paginator(release_entries) release_items = [{ 'published': row.published, 'volume': row.volume, 'chapter': row.chapter, 'fragment': row.fragment, 'postfix': row.postfix, 'include': row.include, 'srcurl': row.srcurl, } for row in release_tmp['items']] ret = { 'group': group.name, 'id': group.id, 'site': group.site, 'alternate-names': names, 'active-series': {series.id: series.title for series in series_items}, 'releases-paginated': release_items, 'feed-paginated': feed_items, } return getDataResponse(ret)
def get_group_id(data): assert "id" in data, "You must specify a id to query for." assert is_integer(data['id']), "The 'id' member must be an integer, or a string that can cleanly cast to one." if 'page' in data: assert is_integer(data['page']), "The 'page' member must be an integer, or a string that can cleanly cast to one if it is present." s_id = int(data['id']) page = int(data.get('page', '1')) group, names, feeds, items_raw, series_items = item_view_items.get_group_id(s_id, page) if not group: return getResponse(error=True, message="Group with id %s not found!" % s_id) try: feed_entries = feeds.paginate(page, app.config['SERIES_PER_PAGE']) except werkzeug.exceptions.NotFound: feed_entries = None try: release_entries = items_raw.paginate(page, app.config['SERIES_PER_PAGE']) except werkzeug.exceptions.NotFound: release_entries = None feed_tmp = unpack_paginator(feed_entries) feed_items = [{ 'title' : row.title, 'contents' : row.contents, 'guid' : row.guid, 'linkurl' : row.linkurl, 'published' : row.published, 'updated' : row.updated, 'srcname' : row.srcname, 'region' : row.region, 'tags' : [tag.tag for tag in row.tags], } for row in feed_tmp['items']] release_tmp = unpack_paginator(release_entries) release_items = [{ 'published' : row.published, 'volume' : row.volume, 'chapter' : row.chapter, 'fragment' : row.fragment, 'postfix' : row.postfix, 'include' : row.include, 'srcurl' : row.srcurl, } for row in release_tmp['items']] ret = { 'group' : group.name, 'id' : group.id, 'site' : group.site, 'alternate-names' : names, 'active-series' : { series.id : series.title for series in series_items }, 'releases-paginated' : release_items, 'feed-paginated' : feed_items, } return getDataResponse(ret)