def test_add_song_existing_with_replace_exisiting_tags(): db_song = mixer.blend( SongDb, title="title", albums=mixer.blend(AlbumDb, name="album"), artists=mixer.blend(ArtistDb, name="artist"), tags=mixer.blend(TagDb, tag_type="type", value="value"), ) song = song_logic.add( SongIn( title="title", length=1, album="album2", album_artist="artist2", artist="artist", tags=[TagIn(tag_type="type2", value="value2")], ), return_existing=True, update_existing=True, replace_existing_tags=True, ) db.flush() assert db_song.id == song.id assert len(song.albums) == 2 assert len(song.tags) == 1 assert orm.count(s for s in SongDb) == 1 assert orm.count(t for t in TagDb) == 2 assert orm.count(a for a in ArtistDb) == 2 assert orm.count(a for a in AlbumDb) == 2
def test_sync_lastfm_scrobbles_since_last_sync_exact_date_time(): mixer.blend(ScrobbleDb, date=datetime.utcfromtimestamp(1584543120)) scrobble_logic.sync_lastfm_scrobbles("Arararararagi") assert orm.count(s for s in ScrobbleDb) == 8 assert orm.count(s for s in SongDb) > 0 assert orm.count(a for a in ArtistDb) > 0 assert orm.count(a for a in AlbumDb) > 0
def reload_songs(path=app.config["PATH_MUSIC"]) -> None: """ Keeps music directory and database in sync. This is done by removing songs that exist in the database, but not on the filesystem. Also adds any songs found in the filesystem that are not present in the database. """ songs_filesystem = [f.name for f in path.glob("*.ogg")] # insert all files if no songs in database if count(s for s in Song) <= 0: for filename in songs_filesystem: insert_song(path / filename) # generate queue if it is empty if count(s for s in Queue) <= 0: generate_queue() songs_db = select(song.filename for song in Song)[:] songs_to_add = [song for song in songs_filesystem if song not in songs_db] songs_to_remove = [ song for song in songs_db if song not in songs_filesystem ] for song in songs_to_remove: Song.get(filename=song).delete() app.logger.info(f"Adding songs: {songs_to_add}") app.logger.info(f"Removing songs: {songs_to_remove}") for filename in songs_to_add: insert_song(Path(app.config["PATH_MUSIC"], filename))
def test_more_structures(self, db, rdf, expected): self.populate(db, rdf) m3 = db.Molecule[3] m3.new_structure(nitrobrombenzene) assert count(m3._structures.reaction_indexes) == expected[0] m3.new_structure(third_nbb) assert count(m3._structures.reaction_indexes) == expected[1]
def read(page=None, order="date"): post_query = Post.select() post_count = count(post_query) if order == "date": post_query = post_query.order_by(desc(Post.date_created)) if "thumbs" in order: up = "up" in order post_query = post_query.order_by( lambda p: desc(count(t for t in p.thumbs if t.up == up))) if page: posts = list(post_query.page(page)) else: posts = list(post_query) posts_json = { "count": post_count, "posts": [{ "id": p.id, "content": p.content, "up": len([t for t in p.thumbs if t.up]), "down": len([t for t in p.thumbs if not t.up]), "date_created": p.date_created.timestamp() } for p in posts] } return posts_json
def select_measurements_db(self, measurement_type: str, metadata: Mapping[str, str] = None, references_this: Mapping[str, int] = None, references_that: Mapping[str, int] = None, ignore_invalidation: bool = False): """ PonyORM select clause with where constraints on Data table (which correspond to MeasurementState classes) Parameters ---------- measurement_type: str where measurement_type == {measurement_type} clauase metadata references_this references_that ignore_invalidation Returns ------- MyDatabase.Data List of MyDatabase.Data instances. Each instance represents separate record in Data table of database. """ if metadata is None: metadata = {} if references_this is None: references_this = {} if references_that is None: references_that = {} q = self.db.Data.select( lambda d: (d.measurement_type == measurement_type and d.sample_name == self.sample_name)) if not ignore_invalidation: # q2 = q.where(lambda d: 'invalidation' not in d.metadata.name) q2 = q.where(lambda d: (not d.invalid) and (not d.incomplete)) else: q2 = q for k, v in metadata.items(): q2 = q2.where(lambda d: count(True for m in d.metadata if m.name == k and m.value == str(v)) > 0) for k, v in references_this.items(): q2 = q2.where( lambda d: count(True for r in d.reference_two if r.ref_type == k and r.this.id == v) > 0) for k, v in references_that.items(): if type(k) is str: q2 = q2.where( lambda d: count(True for r in d.reference_one if r.ref_type == k and r.that.id == v) > 0) elif type(k) is tuple: q2 = q2.where(lambda d: count( True for r in d.reference_one if (r.ref_type == k[ 0] and r.ref_comment == k[1]) and r.that.id == v) > 0) print('reference:', k, ':', v) return q2
def topEditRatio(n: int=10, minMessages: int=30): statusFilter = ["left", "kicked", ""] data = [] for user in select(u for u in User if (count(u.messages) >= minMessages) and (u.lastStatus not in statusFilter)): data.append( (user, count(select(x for x in user.messages if x.edited)) / count(user.messages)) ) data.sort(key=lambda x: x[1], reverse=True) return data[:n]
def list_genres(): return request.formatter( "genres", dict(genre=[ dict(value=genre, songCount=sc, albumCount=ac) for genre, sc, ac in select( (t.genre, count(), count(t.album)) for t in Track if t.genre) ]), )
def test_scrobble_without_date(): scrobble = scrobble_logic.scrobble( ScrobbleIn( title="title", length=1, album="album", artist="artist1", tags=[TagIn(tag_type="type", value="tag")], )) assert orm.count(s for s in SongDb) == 1 assert orm.count(s for s in ScrobbleDb) == 1 assert scrobble.date is not None
def list_genres(): return request.formatter( "genres", { "genre": [{ "value": genre, "songCount": sc, "albumCount": ac } for genre, sc, ac in select( (t.genre, count(), count(t.album)) for t in Track if t.genre)] }, )
def test_migrateLegacyInverterTableToPony_10records(self): # Get data to migrate # Assumed there on db numrecords = 10 # TODO fix order by time or extract first record plant and inverter_name from pony plantName = 'Alcolea' inversorName = 'inversor1' migrateLegacyInverterTableToPony(self.createConfig(), excerpt=True) with self.createPlantmonitorDB() as db: # retrieve expected curr = db._client.cursor() #curr.execute("select * from sistema_inversor where order by location, inverter_name, time desc limit 10;") curr.execute( "select distinct on (time, location, inverter_name) * from sistema_inversor where location = '{}' and inverter_name = '{}' limit 1;" .format(plantName, inversorName)) expectedFirstRegistry = list(curr.fetchone()) with orm.db_session: numplants = orm.count(p for p in self.pony.db.Plant) numinverters = orm.count(i for i in self.pony.db.Inverter) numregistries = orm.count(r for r in self.pony.db.InverterRegistry) self.assertEqual(numinverters * numrecords, numregistries) expectedTime = expectedFirstRegistry[0].replace(tzinfo=dt.timezone.utc) expectedPlant = expectedFirstRegistry[2] expectedInverter = expectedFirstRegistry[1] expectedEnergy = int( round((expectedFirstRegistry[8] << 16) + expectedFirstRegistry[9])) expectedTempInv = expectedFirstRegistry[-1] * 100 expectedMigrateRegistryList = [ expectedTime, expectedPlant, expectedInverter, expectedEnergy, expectedTempInv ] with orm.db_session: query = orm.select( r for r in self.pony.db.InverterRegistry if r.inverter.name == expectedInverter and r.inverter.plant.name == expectedPlant).order_by( InverterRegistry.time) migratedRegistry = query.first() id, time, power, energy, *migratedRegistryList, temperature_dc = list( migratedRegistry.to_dict().values()) migratedRegistryList = [ time, migratedRegistry.inverter.plant.name, migratedRegistry.inverter.name, energy, temperature_dc ] self.assertListEqual(migratedRegistryList, expectedMigrateRegistryList)
def select_measurements_db(self, measurement_type: str, metadata={}, references_this={}, references_that={}, ignore_invalidation=False): """ Selecting records from database according to parameters provided Parameters ---------- measurement_type metadata references_this references_that ignore_invalidation Returns ------- list[MyDatabase.Data] List of MyDatabase.Data instances. Each instance represents separate record in Data table of database. """ q = self.db.Data.select( lambda d: (d.measurement_type == measurement_type and d.sample_name == self.sample_name)) if not ignore_invalidation: # q2 = q.where(lambda d: 'invalidation' not in d.metadata.name) q2 = q.where(lambda d: (not d.invalid) and (not d.incomplete)) else: q2 = q for k, v in metadata.items(): q2 = q2.where(lambda d: count(True for m in d.metadata if m.name == k and m.value == str(v)) > 0) for k, v in references_this.items(): q2 = q2.where( lambda d: count(True for r in d.reference_two if r.ref_type == k and r.this.id == v) > 0) for k, v in references_that.items(): if type(k) is str: q2 = q2.where( lambda d: count(True for r in d.reference_one if r.ref_type == k and r.that.id == v) > 0) elif type(k) is tuple: q2 = q2.where(lambda d: count( True for r in d.reference_one if (r.ref_type == k[ 0] and r.ref_comment == k[1]) and r.that.id == v) > 0) print('reference:', k, ':', v) return q2
def test_fullMigration_excerpt(self): expectednumInverterRegistries = 60 expectednumMeterRegistries = 70 migrateLegacyToPony(self.createConfig(), excerpt=True) with orm.db_session: numInverterRegistries = orm.count( r for r in self.pony.db.InverterRegistry) self.assertEqual(numInverterRegistries, expectednumInverterRegistries) numMeterRegistries = orm.count(r for r in self.pony.db.MeterRegistry) self.assertEqual(numMeterRegistries, expectednumMeterRegistries)
def test_scrobble_with_date(): date = datetime.now() scrobble = scrobble_logic.scrobble( ScrobbleIn( title="title", length=1, album="album", artist="artist1", tags=[TagIn(tag_type="type", value="tag")], date=date, )) assert orm.count(s for s in SongDb) == 1 assert orm.count(s for s in ScrobbleDb) == 1 assert scrobble.date == date
def handle_char_create( pkt: char_create.ClientCharCreate, session: session.Session) -> List[Tuple[op_code.Server, bytes]]: account = world.Account[session.account_name] realm = world.Realm[session.realm_name] # Account limit. if len(account.characters) >= config.MAX_CHARACTERS_PER_ACCOUNT: return [( op_code.Server.CHAR_CREATE, char_create.ServerCharCreate.build( dict(error=ResponseCode.ACCOUNT_LIMIT)), )] # Server limit. if orm.count(p for p in world.Player if p.account == account and p.realm == realm) >= config.MAX_CHARACTERS_PER_REALM: return [( op_code.Server.CHAR_CREATE, char_create.ServerCharCreate.build( dict(error=ResponseCode.SERVER_LIMIT)), )] # Name already in use. if orm.count( p for p in world.Player if p.name.upper() == pkt.name.upper()) > 0: return [( op_code.Server.CHAR_CREATE, char_create.ServerCharCreate.build( dict(error=ResponseCode.NAME_IN_USE)), )] world.Player.New( account=account, realm=realm, name=pkt.name, race=constants.ChrRaces[pkt.race], class_=constants.ChrClasses[pkt.class_], gender=enums.Gender(pkt.gender), skin_color=pkt.skin_color, face=pkt.face, hair_style=pkt.hair_style, hair_color=pkt.hair_color, feature=pkt.feature, ) return [( op_code.Server.CHAR_CREATE, char_create.ServerCharCreate.build(dict(error=ResponseCode.SUCCESS)), )]
def test_add_song_multiple_artists(): song_logic.add( SongIn( title="title", length=1, album="album", album_artist="artist", artist="artist1, artist2", tags=[TagIn(tag_type="type", value="tag")], ) ) assert orm.count(s for s in SongDb) == 1 assert orm.count(t for t in TagDb) == 1 assert orm.count(a for a in ArtistDb) == 3 assert orm.count(a for a in AlbumDb) == 1
def add_tracker(self, tracker_url): """ Adds a new tracker into the tracker info dict and the database. :param tracker_url: The new tracker URL to be added. """ sanitized_tracker_url = get_uniformed_tracker_url(tracker_url) if sanitized_tracker_url is None: self._logger.warning(u"skip invalid tracker: %s", repr(tracker_url)) return with db_session: num = count(g for g in self.tracker_store if g.url == sanitized_tracker_url) if num > 0: self._logger.debug(u"skip existing tracker: %s", repr(tracker_url)) return # insert into database self.tracker_store(url=sanitized_tracker_url, last_check=0, failures=0, alive=True, torrents={})
def calculate_active_presents(self) -> int: """Calculate how many presents are currently not 'stolen' Returns: int: Number of non-stolen presents. """ return orm.count(p for p in self.bot.db.Present if not p.stolen)
def get(self, topic_id): topic_id = int(topic_id) page = force_int(self.get_argument('page', 0), 0) topic = Topic.get(id=topic_id) if not topic: raise tornado.web.HTTPError(404) category = self.get_argument('category', None) if not category: category = 'all' if category == 'all': reply_count = topic.reply_count url = topic.url elif category == 'hot': reply_count = orm.count( topic.get_replies(page=None, category=category)) url = topic.url + '?category=hot' page_count = (reply_count + config.reply_paged - 1) // config.reply_paged if page == 0: page = page_count replies = topic.get_replies(page=page, category=category) form = ReplyForm() return self.render("topic/index.html", topic=topic, replies=replies, form=form, category=category, page=page, page_count=page_count, url=url)
def calculate_all_stolen_presents(self) -> int: """Calculate how many presents are currently 'stolen' Returns: int: Number of stolen presents. """ return orm.count(p for p in self.bot.db.Present if p.stolen)
def album_list(): ltype = request.values["type"] size, offset = map(request.values.get, ("size", "offset")) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = select(t.folder for t in Track) if ltype == "random": return request.formatter( "albumList", { "album": [ a.as_subsonic_child(request.user) for a in distinct(query.random(size)) ] }, ) elif ltype == "newest": query = query.sort_by(desc(Folder.created)).distinct() elif ltype == "highest": query = query.sort_by(lambda f: desc(avg(f.ratings.rating))) elif ltype == "frequent": query = query.sort_by(lambda f: desc(avg(f.tracks.play_count))) elif ltype == "recent": query = select(t.folder for t in Track if max(t.folder.tracks.last_play) is not None).sort_by( lambda f: desc(max(f.tracks.last_play))) elif ltype == "starred": query = select( s.starred for s in StarredFolder if s.user.id == request.user.id and count(s.starred.tracks) > 0) elif ltype == "alphabeticalByName": query = query.sort_by(Folder.name).distinct() elif ltype == "alphabeticalByArtist": query = query.sort_by(lambda f: f.parent.name + f.name) elif ltype == "byYear": startyear = int(request.values["fromYear"]) endyear = int(request.values["toYear"]) query = query.where(lambda t: between(t.year, min(startyear, endyear), max(startyear, endyear))) if endyear < startyear: query = query.sort_by(lambda f: desc(min(f.tracks.year))) else: query = query.sort_by(lambda f: min(f.tracks.year)) elif ltype == "byGenre": genre = request.values["genre"] query = query.where(lambda t: t.genre == genre) else: raise GenericError("Unknown search type") return request.formatter( "albumList", { "album": [ f.as_subsonic_child(request.user) for f in query.limit(size, offset) ] }, )
def get(self): page = force_int(self.get_argument('page', 1), 1) category = self.get_argument('category', None) limit = 12 hot_users = User.get_users(category='hot', limit=limit) new_users = User.get_users(category='new', limit=limit) page_count = 0 users = [] url = '/users' if category == 'all': user_count = orm.count(User.get_users(page=None)) page_count = (user_count + config.user_paged - 1) // config.user_paged users = User.get_users(page=page) url = '/users?category=all' elif category == 'online': online_members = User.get_online_members() online_members = [int(i) for i in online_members] user_count = len(online_members) online_members = online_members[(page - 1) * config.user_paged:page * config.user_paged] users = User.select(lambda rv: rv.id in online_members) page_count = (user_count + config.user_paged - 1) // config.user_paged url = '/users?category=online' return self.render("user/show.html", users=users, hot_users=hot_users, new_users=new_users, page=page, page_count=page_count, url=url, category=category)
def calculate_all_time_presents(self) -> int: """Calculate all time number of presents distributed. Returns: int: All time number of presents distributed. """ return orm.count(p for p in self.bot.db.Present)
def getTimeRange(sensor, srcregistry, dstregistry, srcCol, dstCol, fromDate=None, toDate=None): if not fromDate: fromDate = getNewestTime(sensor, dstregistry, dstCol) if not fromDate: fromDate = getOldestTime(sensor, srcregistry, srcCol) if not toDate: toDate = getNewestTime(sensor, srcregistry, srcCol) if not fromDate: if not srcregistry.select().count(): logger.warning("{} has no registries".format(srcregistry)) else: logger.warning( "fromDate is {} for sensor {} because {} of {} has {} non-null registries".format( fromDate, sensor.to_dict(), srcregistry, srcCol, orm.count(r for r in srcregistry if r.sensor == sensor and getattr(r, srcCol)) ) ) # TODO raise? if not toDate: logger.warning("toDate is {} for sensor {}".format(toDate, sensor)) return fromDate, toDate
def most_played_songs( page: int = 0, page_size: int = 10, min_date: datetime = None, max_date: datetime = None, ) -> List[Dict]: """Get the most played songs of a user in a given time frame ## Arguments: - `page`: `int`, optional: - Defaults to `0`. - `page_size`: `int`, optional: - The size of the pages you want to select. Defaults to `10`. - `min_date`: `datetime`, optional: - The start of data to aggregate. `None` gets data from the beginning. Defaults to `None` - `max_date`: `datetime`, optional: - The end of data to aggregate `None` gets data until the end. Defaults to `None` ## Returns: - `List[Dict]`: - The list of scrobbles ordered by descending plays. The dict has two keys: `song` of type `SongDb` and `plays` of type `int` """ query = orm.select((scrobble.song, orm.count(scrobble)) for scrobble in ScrobbleDb) query = query.order_by(lambda song, count: orm.desc(count)) if min_date is not None: query = query.where(lambda scrobble: scrobble.date >= min_date) if max_date is not None: query = query.where(lambda scrobble: scrobble.date <= max_date) songs = list(query.page(page, page_size)) return [{"song": song, "plays": plays} for song, plays in songs]
def get(self): page = force_int(self.get_argument('page', 1), 1) category = self.get_argument('category', None) limit = 12 hot_users = User.get_users(category='hot', limit=limit) new_users = User.get_users(category='new', limit=limit) page_count = 0 users = [] url = '/users' if category == 'all': user_count = orm.count(User.get_users(page=None)) page_count = (user_count + config.user_paged - 1) // config.user_paged users = User.get_users(page=page) url = '/users?category=all' elif category == 'online': online_members = User.get_online_members() online_members = [int(i) for i in online_members] user_count = len(online_members) online_members = online_members[(page - 1) * config.user_paged: page * config.user_paged] users = User.select(lambda rv: rv.id in online_members) page_count = (user_count + config.user_paged - 1) // config.user_paged url = '/users?category=online' return self.render("user/show.html", users=users, hot_users=hot_users, new_users=new_users, page=page, page_count=page_count, url=url, category=category)
def show_type(type, page): """ Displays list.html with the lasts articles of a given type. Returns an error if the page is empty or if type doesn't exists. """ if type not in types: return render_template('components/erreur.html', erreur="Le type souhaité est invalide !") type_data = types[type] data = Articles.select( lambda a: a.type == type).order_by( desc( Articles.timestamp))[ page * 10:page * 10 + 10] articles = [] for item in data: articles.append(fill_informations(item)) return render_template('blog/list.html', template="type", type=type, icon=type_data['icon'], name=type_data['name'], articles=articles, total_articles=count( a for a in Articles if a.type == type), page=page)
def ta_posts(args): """Show number of posts for each TA""" tas = User.select(lambda u: u.user_type == 'ta').order_by( lambda u: desc(count(u.posts))) for ta in tas: print(str(len(ta.posts)).ljust(4), ta.name)
def student_posts(args): """Show number of posts for each student""" students = User.select(lambda u: u.user_type == 'student').order_by( lambda u: desc(count(u.posts))) for student in students: print(str(len(student.posts)).ljust(4), student.name)
def calculate_please(self) -> int: """Calculate how many presents were given to a user by themselves Returns: int: Number of self-gifted presents. """ return orm.count(p for p in self.bot.db.Present if p.please)
def as_subsonic_album(self, user): # "AlbumID3" type in XSD info = { "id": str(self.id), "name": self.name, "artist": self.artist.name, "artistId": str(self.artist.id), "songCount": self.tracks.count(), "duration": sum(self.tracks.duration), "created": min(self.tracks.created).isoformat(), } track_with_cover = self.tracks.select( lambda t: t.folder.cover_art is not None).first() if track_with_cover is not None: info["coverArt"] = str(track_with_cover.folder.id) else: track_with_cover = self.tracks.select(lambda t: t.has_art).first() if track_with_cover is not None: info["coverArt"] = str(track_with_cover.id) if count(self.tracks.year) > 0: info["year"] = min(self.tracks.year) genre = ", ".join(self.tracks.genre.distinct()) if genre: info["genre"] = genre try: starred = StarredAlbum[user.id, self.id] info["starred"] = starred.date.isoformat() except ObjectNotFound: pass return info
def get_folder(self, cat_id, folderName): with db_session: folders = count(f.Folder for f in Fanfic if f.Category_Id == cat_id and f.Folder == folderName) return folders
def get_cat_count(self, categoryName): with db_session: category = count(c for c in Category if c.Name == categoryName and c.Id > 0) return category
async def get_results(self, request): """Получение результатов голосования (список диктов)""" with db_session: result = left_join( (o.name, count(r)) for o in OptionModel for r in o.vote )[:] return web.json_response(result)
def get(self, urlname, view='index', category='all'): page = force_int(self.get_argument('page', 1), 1) user = User.get(urlname=urlname) if not user: raise tornado.web.HTTPError(404) action = self.get_argument('action', None) if action and self.current_user: if action == 'follow' and user != self.current_user: result = self.current_user.follow(whom_id=user.id) return self.send_result(result) items = [] item_count = 0 url = user.url if view == 'topics': items = user.get_topics(page=page, category=category) item_count = orm.count(user.get_topics(page=None, category=category)) url += '/topics' elif view == 'replies': items = user.get_replies(page=page, category=category) item_count = orm.count(user.get_replies(page=None, category=category)) url += '/replies' elif view == 'followings': items = user.get_followings(page=page) item_count = orm.count(user.get_followings(page=None)) url += '/followings' elif view == 'followers': items = user.get_followers(page=page) item_count = orm.count(user.get_followers(page=None)) url += '/followers' elif view == 'albums': items = user.get_albums(page=page) item_count = orm.count(user.get_albums(page=None)) url += '/albums' page_count = (item_count + config.paged - 1) // config.paged return self.render("user/index.html", user=user, items=items, view=view, category=category, page=page, page_count=page_count, url=url)
def get(self): page = force_int(self.get_argument('page', 1), 1) page_count = 0 nodes = [] category = self.get_argument('category', None) hot_nodes = Node.get_nodes(category='hot', limit=8) new_nodes = Node.get_nodes(category='new', limit=8) url = '/nodes' if category == 'all': nodes = Node.get_nodes(category='all', page=page) node_count = orm.count(Node.get_nodes(page=None)) page_count = (node_count + config.node_paged - 1) // config.node_paged url = '/nodes?category=' + category return self.render("node/show.html", hot_nodes=hot_nodes, new_nodes=new_nodes, nodes=nodes, category=category, page=page, page_count=page_count, url=url)
def get(self): page = force_int(self.get_argument('page', 1), 1) category = self.get_argument('category', None) view = self.get_argument('view', 'all') user = self.current_user if not category: category = self.index_category else: self.set_index_category(category) if category == 'timeline' and not user: category = self.set_index_category('index') if category == 'hot': topics = mc.get('hot_topics') if not topics: now = int(time.time()) ago = now - 60 * 60 * 24 topics = orm.select(rv for rv in Topic if rv.created_at > ago).order_by(lambda: orm.desc((rv.collect_count + rv.thank_count - rv.report_count) * 10 + (rv.up_count - rv.down_count) * 5 + rv.reply_count * 3)) mc.set('hot_topics', list(topics), 60 * 60 * 2) elif category == 'timeline': topics = user.get_followed_topics(page=None, category=view) elif category == 'latest': topics = orm.select(rv for rv in Topic).order_by(lambda: orm.desc(rv.created_at)) elif category == 'desert': topics = orm.select(rv for rv in Topic if rv.reply_count == 0).order_by(lambda: orm.desc(rv.created_at)) else: topics = orm.select(rv for rv in Topic).order_by(lambda: orm.desc(rv.last_reply_date)) if isinstance(topics, list): topic_count = len(topics) else: topic_count = orm.count(topics) topics = topics[(page - 1) * config.paged: page * config.paged] page_count = (topic_count + config.paged - 1) // config.paged return self.render("site/index.html", topics=topics, category=category, page=page, view=view, page_count=page_count, url='/')
def add_tracker(self, tracker_url): """ Adds a new tracker into the tracker info dict and the database. :param tracker_url: The new tracker URL to be added. """ sanitized_tracker_url = get_uniformed_tracker_url(tracker_url) if sanitized_tracker_url is None: self._logger.warn(u"skip invalid tracker: %s", repr(tracker_url)) return with db_session: num = count(g for g in self.tracker_store if g.url == sanitized_tracker_url) if num > 0: self._logger.debug(u"skip existing tracker: %s", repr(tracker_url)) return # insert into database self.tracker_store(url=sanitized_tracker_url, last_check=0, failures=0, alive=True, torrents={})
def get(self): page = force_int(self.get_argument("page", 1), 1) page_count = 0 nodes = [] category = self.get_argument("category", None) hot_nodes = Node.get_nodes(category="hot", limit=8) new_nodes = Node.get_nodes(category="new", limit=8) url = "/nodes" if category == "all": nodes = Node.get_nodes(category="all", page=page) node_count = orm.count(Node.get_nodes(page=None)) page_count = (node_count + config.node_paged - 1) // config.node_paged url = "/nodes?category=" + category return self.render( "node/show.html", hot_nodes=hot_nodes, new_nodes=new_nodes, nodes=nodes, category=category, page=page, page_count=page_count, url=url, )
def charge_documents(self): """ This method allow you to charge documents you already have in your database. In this way an Index would be created according to the model and fields registered. """ doc_count = self._whoosh.doc_count() objs = orm.count(e for e in self._model) field_names = set(self._schema_attrs.keys()) missings = set(self._whoosh.schema.names()) for f in list(field_names - missings): self.add_field(f, fields.TEXT(self.kw)) if doc_count == 0 and objs > 0: writer = self._whoosh.writer() for obj in orm.select(e for e in self._model): attrs = {self._primary_key: obj.get_pk()} for f in self._schema_attrs.keys(): attrs[f] = unicode(getattr(obj, f)) writer.add_document(**attrs) writer.commit()
def get(self, topic_id): topic_id = int(topic_id) page = force_int(self.get_argument('page', 0), 0) topic = Topic.get(id=topic_id) if not topic: raise tornado.web.HTTPError(404) category = self.get_argument('category', None) if not category: category = 'all' if category == 'all': reply_count = topic.reply_count url = topic.url elif category == 'hot': reply_count = orm.count(topic.get_replies(page=None, category=category)) url = topic.url + '?category=hot' page_count = (reply_count + config.reply_paged - 1) // config.reply_paged if page == 0: page = page_count replies = topic.get_replies(page=page, category=category) form = ReplyForm() return self.render("topic/index.html", topic=topic, replies=replies, form=form, category=category, page=page, page_count=page_count, url=url)
def history_count(self): return orm.count(self.histories)
def album_list(): ltype = request.values['type'] size, offset = map(request.values.get, [ 'size', 'offset' ]) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = select(t.folder for t in Track) if ltype == 'random': return request.formatter('albumList', dict( album = [ a.as_subsonic_child(request.user) for a in query.without_distinct().random(size) ] )) elif ltype == 'newest': query = query.order_by(desc(Folder.created)) elif ltype == 'highest': query = query.order_by(lambda f: desc(avg(f.ratings.rating))) elif ltype == 'frequent': query = query.order_by(lambda f: desc(avg(f.tracks.play_count))) elif ltype == 'recent': query = select(t.folder for t in Track if max(t.folder.tracks.last_play) is not None).order_by(lambda f: desc(max(f.tracks.last_play))) elif ltype == 'starred': query = select(s.starred for s in StarredFolder if s.user.id == request.user.id and count(s.starred.tracks) > 0) elif ltype == 'alphabeticalByName': query = query.order_by(Folder.name) elif ltype == 'alphabeticalByArtist': query = query.order_by(lambda f: f.parent.name + f.name) else: raise GenericError('Unknown search type') return request.formatter('albumList', dict( album = [ f.as_subsonic_child(request.user) for f in query.limit(size, offset) ] ))
def get_starred(): folders = select(s.starred for s in StarredFolder if s.user.id == request.user.id) return request.formatter('starred', dict( artist = [ dict(id = str(sf.id), name = sf.name) for sf in folders.filter(lambda f: count(f.tracks) == 0) ], album = [ sf.as_subsonic_child(request.user) for sf in folders.filter(lambda f: count(f.tracks) > 0) ], song = [ st.as_subsonic_child(request.user, request.client) for st in select(s.starred for s in StarredTrack if s.user.id == request.user.id) ] ))
def get(self, urlname, category='all'): node = Node.get(urlname=urlname) if not node: raise tornado.web.HTTPError(404) page = force_int(self.get_argument('page', 1), 1) action = self.get_argument('action', None) tag = self.get_argument('tag', None) if tag: if tag == 'description': result = { 'status': 'success', 'message': '简介传输成功', 'node_description': node.description, 'node_topic_count': node.topic_count, 'node_follow_count': node.follow_count, } return self.write(result) if tag == 'relationship': parent_nodes = node.parent_nodes child_nodes = node.child_nodes sibling_nodes = node.sibling_nodes parent_json = [] children_json = [] sibling_json = [] for p in parent_nodes: parent_json.append(dict(id=p.id, name=p.name, url=p.url, description=p.description, summary=p.summary, urlname=p.urlname, icon=p.icon)) for c in child_nodes: children_json.append(dict(id=c.id, name=c.name, url=c.url, description=c.description, summary=c.summary, urlname=c.urlname, icon=c.icon)) for s in sibling_nodes: sibling_json.append(dict(id=s.id, name=s.name, url=s.url, description=s.description, summary=s.summary, urlname=s.urlname, icon=s.icon)) result = { 'status': 'success', 'parent_nodes': parent_json, 'child_nodes': children_json, 'sibling_nodes': sibling_json } return self.write(result) user = self.current_user if action and user: if action == 'follow': result = user.follow(node_id=node.id) if self.is_ajax: return self.write(result) self.flash_message(**result) return self.redirect_next_url() topic_count = orm.count(node.get_topics(page=None, category=category)) page_count = (topic_count + config.reply_paged - 1) // config.reply_paged url = node.url + '?category=' + category topics = node.get_topics(page=page, category=category) return self.render("node/index.html", node=node, topics=topics, category=category, page=page, page_count=page_count, url=url)
def is_registered(self, email): return count(user for user in User if user.email == email) > 0
def get(self, urlname, category="all"): node = Node.get(urlname=urlname) if not node: raise tornado.web.HTTPError(404) page = force_int(self.get_argument("page", 1), 1) action = self.get_argument("action", None) tag = self.get_argument("tag", None) if tag: if tag == "description": result = { "status": "success", "message": "简介传输成功", "node_description": node.description, "node_topic_count": node.topic_count, "node_follow_count": node.follow_count, } return self.write(result) if tag == "relationship": parent_nodes = node.parent_nodes child_nodes = node.child_nodes sibling_nodes = node.sibling_nodes parent_json = [] children_json = [] sibling_json = [] for p in parent_nodes: parent_json.append( dict( id=p.id, name=p.name, url=p.url, description=p.description, summary=p.summary, urlname=p.urlname, icon=p.icon, ) ) for c in child_nodes: children_json.append( dict( id=c.id, name=c.name, url=c.url, description=c.description, summary=c.summary, urlname=c.urlname, icon=c.icon, ) ) for s in sibling_nodes: sibling_json.append( dict( id=s.id, name=s.name, url=s.url, description=s.description, summary=s.summary, urlname=s.urlname, icon=s.icon, ) ) result = { "status": "success", "parent_nodes": parent_json, "child_nodes": children_json, "sibling_nodes": sibling_json, } return self.write(result) user = self.current_user if action and user: if action == "follow": result = user.follow(node_id=node.id) if self.is_ajax: return self.write(result) self.flash_message(**result) return self.redirect_next_url() topic_count = orm.count(node.get_topics(page=None, category=category)) page_count = (topic_count + config.reply_paged - 1) // config.reply_paged url = node.url + "?category=" + category topics = node.get_topics(page=page, category=category) return self.render( "node/index.html", node=node, topics=topics, category=category, page=page, page_count=page_count, url=url )
def contains_course(self, code): return count(course for course in Site if course.course_code == code) > 0
def get_numer_of_similarities(self): similarities = count(s for s in Similarity) self.success(similarities)
def get_list(self): """获取所有标签""" with db_session: out_tag = lambda t: {"name": t.name, "count": count(t.article_metas)} tags = [out_tag(t) for t in model.Tag.select()] return tags
def count(self, schema): return pn.count(each for each in schema)
def contents_len(self): return orm.count(self.contents)
def get_numer_of_thesis(self): thesis = count(t for t in Thesis) self.success(thesis)
def get_random_image(): num_images = count(i for i in Image) random = randint(1, num_images) results = db.select("* from Image where id = $image_id", {"image_id": random}) return results[0]
def get_numer_of_users(self): users = count(u for u in User) self.success(users)
def unread_abuse_reports_count(): return select( (x.target_type, x.target_id, count(x.id)) for x in AbuseReport if not x.ignored and x.resolved_at is None ).count()
def convert_discovered_torrents(self): offset = 0 # Reflect conversion state with db_session: v = self.mds.MiscData.get_for_update(name=CONVERSION_FROM_72_DISCOVERED) if v: offset = orm.count( g for g in self.mds.TorrentMetadata if g.status == LEGACY_ENTRY and g.metadata_type == REGULAR_TORRENT) v.set(value=CONVERSION_STARTED) else: self.mds.MiscData(name=CONVERSION_FROM_72_DISCOVERED, value=CONVERSION_STARTED) start_time = datetime.datetime.utcnow() batch_size = 100 total_to_convert = self.get_old_torrents_count() reference_timedelta = datetime.timedelta(milliseconds=100) start = 0 + offset end = start while start < total_to_convert: batch = self.get_old_torrents(batch_size=batch_size, offset=start) if not batch: break end = start + len(batch) batch_start_time = datetime.datetime.now() try: with db_session: for (t, _) in batch: if self.shutting_down: return try: self.mds.TorrentMetadata(**t) except (TransactionIntegrityError, CacheIndexError): pass except (TransactionIntegrityError, CacheIndexError): pass batch_end_time = datetime.datetime.now() - batch_start_time # It is not necessary to put 'sleep' here, because get_old_torrents effectively plays that role target_coeff = (batch_end_time.total_seconds() / reference_timedelta.total_seconds()) if len(batch) == batch_size: # Adjust batch size only for full batches if target_coeff < 0.8: batch_size += batch_size elif target_coeff > 1.0: batch_size = int(float(batch_size) / target_coeff) batch_size += 1 # we want to guarantee that at least something will go through self._logger.info("Converted old torrents batch: %i/%i %f ", start + batch_size, total_to_convert, float(batch_end_time.total_seconds())) if self.notifier_callback: self.notifier_callback("%i/%i" % (start + batch_size, total_to_convert)) start = end with db_session: v = self.mds.MiscData.get_for_update(name=CONVERSION_FROM_72_DISCOVERED) v.value = CONVERSION_FINISHED stop_time = datetime.datetime.utcnow() elapsed = (stop_time - start_time).total_seconds() if self.notifier_callback: self.notifier_callback( "%i entries converted in %i seconds (%i e/s)" % ( end - offset, int(elapsed), int((end - offset) / elapsed)))
def n_items(self): return orm.count(item for item in self.items)