def album_list(): ltype = request.values["type"] size, offset = map(request.values.get, ("size", "offset")) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = select(t.folder for t in Track) if ltype == "random": return request.formatter( "albumList", { "album": [ a.as_subsonic_child(request.user) for a in distinct(query.random(size)) ] }, ) elif ltype == "newest": query = query.sort_by(desc(Folder.created)).distinct() elif ltype == "highest": query = query.sort_by(lambda f: desc(avg(f.ratings.rating))) elif ltype == "frequent": query = query.sort_by(lambda f: desc(avg(f.tracks.play_count))) elif ltype == "recent": query = select(t.folder for t in Track if max(t.folder.tracks.last_play) is not None).sort_by( lambda f: desc(max(f.tracks.last_play))) elif ltype == "starred": query = select( s.starred for s in StarredFolder if s.user.id == request.user.id and count(s.starred.tracks) > 0) elif ltype == "alphabeticalByName": query = query.sort_by(Folder.name).distinct() elif ltype == "alphabeticalByArtist": query = query.sort_by(lambda f: f.parent.name + f.name) elif ltype == "byYear": startyear = int(request.values["fromYear"]) endyear = int(request.values["toYear"]) query = query.where(lambda t: between(t.year, min(startyear, endyear), max(startyear, endyear))) if endyear < startyear: query = query.sort_by(lambda f: desc(min(f.tracks.year))) else: query = query.sort_by(lambda f: min(f.tracks.year)) elif ltype == "byGenre": genre = request.values["genre"] query = query.where(lambda t: t.genre == genre) else: raise GenericError("Unknown search type") return request.formatter( "albumList", { "album": [ f.as_subsonic_child(request.user) for f in query.limit(size, offset) ] }, )
def album_list_id3(): ltype = request.values["type"] size, offset = map(request.values.get, ("size", "offset")) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = Album.select() if ltype == "random": return request.formatter( "albumList2", { "album": [ a.as_subsonic_album(request.user) for a in query.random(size) ] }, ) elif ltype == "newest": query = query.order_by(lambda a: desc(min(a.tracks.created))) elif ltype == "frequent": query = query.order_by(lambda a: desc(avg(a.tracks.play_count))) elif ltype == "recent": query = Album.select( lambda a: max(a.tracks.last_play) is not None).order_by( lambda a: desc(max(a.tracks.last_play))) elif ltype == "starred": query = select(s.starred for s in StarredAlbum if s.user.id == request.user.id) elif ltype == "alphabeticalByName": query = query.order_by(Album.name) elif ltype == "alphabeticalByArtist": query = query.order_by(lambda a: a.artist.name + a.name) elif ltype == "byYear": startyear = int(request.values["fromYear"]) endyear = int(request.values["toYear"]) query = query.where(lambda a: between(min( a.tracks.year), min(startyear, endyear), max(startyear, endyear))) if endyear < startyear: query = query.order_by(lambda a: desc(min(a.tracks.year))) else: query = query.order_by(lambda a: min(a.tracks.year)) elif ltype == "byGenre": genre = request.values["genre"] query = query.where(lambda a: genre in a.tracks.genre) else: raise GenericError("Unknown search type") return request.formatter( "albumList2", { "album": [ f.as_subsonic_album(request.user) for f in query.limit(size, offset) ] }, )
def checknewscomments(news): orm.sql_debug(False) if not news: with orm.db_session: first_newsitem = orm.select(orm.min(x.id) for x in NewsItem).first() last_newsitem = orm.select(orm.max(x.id) for x in NewsItem).first() newsitem_id = first_newsitem else: i = 0 while True: with orm.db_session: if not news: newsitem = NewsItem.select(lambda x: x.id >= newsitem_id and x.id <= last_newsitem).first() if not newsitem: break else: if i >= len(news): break if news[i].isdigit(): newsitem = NewsItem.get(id=int(news[i])) else: newsitem = NewsItem.get(name=news[i]) i += 1 if not newsitem: print('News item {} not found'.format(news[i - 1])) continue print('News item {} ({})'.format(newsitem.id, newsitem.name)) comments_list = newsitem.bl.select_comments().order_by('c.date, c.id') check_comments_for(newsitem, comments_list) newsitem_id = newsitem.id + 1
def album_list_id3(): ltype = request.values['type'] size, offset = map(request.values.get, [ 'size', 'offset' ]) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = Album.select() if ltype == 'random': return request.formatter('albumList2', dict( album = [ a.as_subsonic_album(request.user) for a in query.random(size) ] )) elif ltype == 'newest': query = query.order_by(lambda a: desc(min(a.tracks.created))) elif ltype == 'frequent': query = query.order_by(lambda a: desc(avg(a.tracks.play_count))) elif ltype == 'recent': query = Album.select(lambda a: max(a.tracks.last_play) is not None).order_by(lambda a: desc(max(a.tracks.last_play))) elif ltype == 'starred': query = select(s.starred for s in StarredAlbum if s.user.id == request.user.id) elif ltype == 'alphabeticalByName': query = query.order_by(Album.name) elif ltype == 'alphabeticalByArtist': query = query.order_by(lambda a: a.artist.name + a.name) else: raise GenericError('Unknown search type') return request.formatter('albumList2', dict( album = [ f.as_subsonic_album(request.user) for f in query.limit(size, offset) ] ))
def album_list(): ltype = request.values['type'] size, offset = map(request.values.get, [ 'size', 'offset' ]) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = select(t.folder for t in Track) if ltype == 'random': return request.formatter('albumList', dict( album = [ a.as_subsonic_child(request.user) for a in query.without_distinct().random(size) ] )) elif ltype == 'newest': query = query.order_by(desc(Folder.created)) elif ltype == 'highest': query = query.order_by(lambda f: desc(avg(f.ratings.rating))) elif ltype == 'frequent': query = query.order_by(lambda f: desc(avg(f.tracks.play_count))) elif ltype == 'recent': query = select(t.folder for t in Track if max(t.folder.tracks.last_play) is not None).order_by(lambda f: desc(max(f.tracks.last_play))) elif ltype == 'starred': query = select(s.starred for s in StarredFolder if s.user.id == request.user.id and count(s.starred.tracks) > 0) elif ltype == 'alphabeticalByName': query = query.order_by(Folder.name) elif ltype == 'alphabeticalByArtist': query = query.order_by(lambda f: f.parent.name + f.name) else: raise GenericError('Unknown search type') return request.formatter('albumList', dict( album = [ f.as_subsonic_child(request.user) for f in query.limit(size, offset) ] ))
def album_list(): ltype = request.values["type"] size, offset = map(request.values.get, ["size", "offset"]) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = select(t.folder for t in Track) if ltype == "random": return request.formatter( "albumList", dict( album=[ a.as_subsonic_child(request.user) for a in query.without_distinct().random(size) ] ), ) elif ltype == "newest": query = query.order_by(desc(Folder.created)).distinct() elif ltype == "highest": query = query.order_by(lambda f: desc(avg(f.ratings.rating))) elif ltype == "frequent": query = query.order_by(lambda f: desc(avg(f.tracks.play_count))) elif ltype == "recent": query = select( t.folder for t in Track if max(t.folder.tracks.last_play) is not None ).order_by(lambda f: desc(max(f.tracks.last_play))) elif ltype == "starred": query = select( s.starred for s in StarredFolder if s.user.id == request.user.id and count(s.starred.tracks) > 0 ) elif ltype == "alphabeticalByName": query = query.order_by(Folder.name) elif ltype == "alphabeticalByArtist": query = query.order_by(lambda f: f.parent.name + f.name) else: raise GenericError("Unknown search type") return request.formatter( "albumList", dict( album=[f.as_subsonic_child(request.user) for f in query.limit(size, offset)] ), )
def checkstoryvoting(story_ids): orm.sql_debug(False) if not current_app.story_voting: print('Story voting is disabled.') return if not story_ids: with orm.db_session: first_story = orm.select(orm.min(x.id) for x in Story).first() last_story = orm.select(orm.max(x.id) for x in Story).first() story_id = first_story else: story_ids_queue = story_ids[::-1] # reversed while True: with orm.db_session: if not story_ids: stories = Story.select(lambda x: x.id >= story_id and x.id <= last_story).order_by(Story.id)[:50] if not stories: break else: if not story_ids_queue: break stories = list(Story.select(lambda x: x.id in story_ids_queue[-50:]).order_by(Story.id)) story_ids_queue = story_ids_queue[:-50] if not stories: continue changed_stories = [] for story in stories: print('Story {}:'.format(story.id), end=' ', flush=True) old_count = story.vote_total # TODO: rename to story.vote_count old_value = story.vote_value old_extra = story.vote_extra current_app.story_voting.update_rating(story) new_count = story.vote_total new_value = story.vote_value new_extra = story.vote_extra print('{} -> {}'.format(old_value, new_value), end='', flush=True) if old_count != new_count or old_value != new_value or json.loads(old_extra) != json.loads(new_extra): print(' (changed)') changed_stories.append(story) else: print('') print('Saving...', end=' ', flush=True) for story in changed_stories: story.bl.search_update(update_fields={'vote_total', 'vote_value'}) orm.commit() print('Done.', flush=True) if not story_ids: story_id = stories[-1].id + 1
def create(cls, ip, user): " Create a new login token assigned to the specified IP and user. " id = (orm.max(x.id for x in cls) or 0) + 1 created = datetime.datetime.now() token = str(uuid.uuid4()).replace("-", "") token += hashlib.md5((token + str(created)).encode()).hexdigest() return cls(id=id, ip=ip, user=user, token=token, created=created)
def restore(self, author=None): if not self.can_restore_by(author): raise ValueError('Permission denied') self.model.deleted = False target = getattr(self.model, self.target_attr) if hasattr(target, 'last_comment_id'): target.last_comment_id = orm.select(orm.max(x.id) for x in target.comments if not x.deleted).first() current_app.cache.delete('index_comments_html')
def restore(self, author=None): if not self.can_restore_by(author): raise ValueError('Permission denied') self.model.deleted = False target = getattr(self.model, self.target_attr) if hasattr(target, 'last_comment_id'): target.last_comment_id = orm.select( orm.max(x.id) for x in target.comments if not x.deleted).first() current_app.cache.delete('index_comments_html')
def failures_by_exception(run_id=None): if not run_id: run_id = P.max(r.id for r in RunId) cur = db.get_connection().cursor() return cur.execute( """ select exception_message, count(*) from NodeResolution where success = 0 and run_id = ? group by exception_message order by count(*) desc """, [run_id]).fetchall()
def delete(self, author=None): if not self.can_delete_by(author): raise ValueError('Permission denied') self.model.deleted = True self.model.last_deleted_at = datetime.utcnow() self.model.last_deleted_by = author if author and author.is_authenticated else None target = getattr(self.model, self.target_attr) if hasattr(target, 'last_comment_id') and target.last_comment_id == self.model.id: target.last_comment_id = orm.select(orm.max(x.id) for x in target.comments if not x.deleted).first() or 0 current_app.cache.delete('index_comments_html')
def album_list_id3(): ltype = request.values["type"] size, offset = map(request.values.get, ["size", "offset"]) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = Album.select() if ltype == "random": return request.formatter( "albumList2", dict(album=[ a.as_subsonic_album(request.user) for a in query.random(size) ]), ) elif ltype == "newest": query = query.order_by(lambda a: desc(min(a.tracks.created))) elif ltype == "frequent": query = query.order_by(lambda a: desc(avg(a.tracks.play_count))) elif ltype == "recent": query = Album.select( lambda a: max(a.tracks.last_play) is not None).order_by( lambda a: desc(max(a.tracks.last_play))) elif ltype == "starred": query = select(s.starred for s in StarredAlbum if s.user.id == request.user.id) elif ltype == "alphabeticalByName": query = query.order_by(Album.name) elif ltype == "alphabeticalByArtist": query = query.order_by(lambda a: a.artist.name + a.name) else: raise GenericError("Unknown search type") return request.formatter( "albumList2", dict(album=[ f.as_subsonic_album(request.user) for f in query.limit(size, offset) ]), )
class ChapterFactory(PonyFactory): class Meta(object): model = models.Chapter story = factory.SubFactory(StoryFactory) order = factory.LazyAttribute( lambda obj: (orm.select(orm.max(x.order) for x in models.Chapter if x.story == obj.story).first() or 0) + 1 ) text = factory.Sequence(lambda n: "This is text of chapter %d" % n) text_md5 = factory.LazyAttribute(lambda obj: md5(obj.text.encode('utf-8')).hexdigest()) story_published = factory.LazyAttribute(lambda obj: obj.story.published) first_published_at = factory.LazyAttribute(lambda obj: None if obj.draft else obj.story.first_published_at) draft = False
def enter(sessionID): sessionID = Sessions.initializeSession(sessionID, request, response) respData = MultiDict(url=url, title='Blood Glucose') isSignedOn = Sessions.getSessionValueFromDB(sessionID, 'isSignedOn') respData.update(MultiDict(sessionID=sessionID, numberOfHeldReadings=numberOfPartials())) respData.update(MultiDict(dbFileName=dbFileName)) if isSignedOn: with db_session: maxReadingDate = max(r.date for r in Readings) respData.update(MultiDict(maxDate=maxReadingDate)) return jinja2_template('EnterReading.jinja2', respData, template_lookup=['templates']) else: return jinja2_template('Signon.jinja2', respData, template_lookup=['templates'])
def album_list_id3(): ltype = request.values['type'] size, offset = map(request.values.get, ['size', 'offset']) size = int(size) if size else 10 offset = int(offset) if offset else 0 query = Album.select() if ltype == 'random': return request.formatter( 'albumList2', dict(album=[ a.as_subsonic_album(request.user) for a in query.random(size) ])) elif ltype == 'newest': query = query.order_by(lambda a: desc(min(a.tracks.created))) elif ltype == 'frequent': query = query.order_by(lambda a: desc(avg(a.tracks.play_count))) elif ltype == 'recent': query = Album.select( lambda a: max(a.tracks.last_play) is not None).order_by( lambda a: desc(max(a.tracks.last_play))) elif ltype == 'starred': query = select(s.starred for s in StarredAlbum if s.user.id == request.user.id) elif ltype == 'alphabeticalByName': query = query.order_by(Album.name) elif ltype == 'alphabeticalByArtist': query = query.order_by(lambda a: a.artist.name + a.name) else: raise GenericError('Unknown search type') return request.formatter( 'albumList2', dict(album=[ f.as_subsonic_album(request.user) for f in query.limit(size, offset) ]))
def failures_by_traceback(run_id=None): if not run_id: run_id = P.max(r for r in RunId) cur = db.get_connection().cursor() return P.select(n for n in NodeResolution if not n.success and run_id == run_id) return cur.execute( """ select traceback, exception_message, count(*) from NodeResolution where success = 0 and run_id = ? group by traceback order by count(*) desc """, [run_id]).fetchall()
def current(strVal, Database, strValUnits): with orm.db_session: # fetch most current datetime for val latest = orm.max(v.time_stamp for v in Database) if not isinstance(latest, datetime.datetime): latest = dparser.parse(latest) val = orm.select(v for v in Database if v.time_stamp == latest) # should only obtain single element with latest timestamp for v in val: val = int(v.value) return [ "The " + strVal + " is currently " + str(val) + " " + strValUnits + ".", val ]
def sample_songs_weighted(num: int = 6) -> List[Song]: """ Samples a selection of songs from the Songs table, weighted by playcount. This means songs that have been played less have a higher chance of being put in the queue. :param int num: number of songs to sample :return: list of songs sampled from Songs table, weighted by playcount """ songs = Song.select()[:] if len(songs) < num: return songs weights = [] max_plays = max(s.playcount for s in Song) + 1 for song in songs: weights.append(abs(max_plays - song.playcount)) return choices(songs, weights=weights, k=num)
def delete(self, author=None): if not self.can_delete_by(author): raise ValueError('Permission denied') self.model.deleted = True self.model.last_deleted_at = datetime.utcnow() self.model.last_deleted_by = author if author and author.is_authenticated else None target = getattr(self.model, self.target_attr) if hasattr( target, 'last_comment_id') and target.last_comment_id == self.model.id: target.last_comment_id = orm.select( orm.max(x.id) for x in target.comments if not x.deleted).first() or 0 current_app.cache.delete('index_comments_html')
def download_data(bounds): (from_num, to_num) = bounds rn1 = [str(x).zfill(2) for x in range(0, 100)] rn2 = [str(x).zfill(3) for x in range(0, 1000)] rn3 = [str(x).zfill(3) for x in range(0, 1000)] logger.info("Generating the list of Roll Numbers") RNLIST = [RollNo(a, b, c, "") for a in rn1 for b in rn2 for c in rn3] URL = "http://pec.edu.pk" INVALID_RESULT = "No Result found" logger.info("Start Parameter is: {}".format(from_num)) logger.info("End Parameter is: {}".format(to_num)) logger.info("Saving data to database {}".format(DBNAME)) try: with orm.db_session: last_record = orm.max(r.id for r in Record) if last_record == to_num: logger.info("No new data to download. Exiting...") return if last_record: last_idx = orm.select(r.idx for r in Record if r.id == last_record)[:][0] start = last_idx + 1 else: start = from_num logger.info( "Starting the Brute Force Search from position {}".format(start)) logger.info("Process started at {}".format(time.strftime('%c'))) for idx, rn in enumerate(RNLIST[start:to_num]): if idx % wait == 0: time.sleep(5) if idx % 10 == 0: logger.info("Downloading data for Roll No. {}".format("-".join( rn[:3]))) logger.info("Downloading Roll No. {}".format("-".join(rn[:3]))) visit(URL, rn, invalid=INVALID_RESULT, idx=RNLIST.index(rn)) logger.info("Process ended at {}".format(time.strftime('%c'))) except KeyboardInterrupt as e: logger.error(str(e)) logger.error("Recieved Keyboard Interrupt. Exiting.")
def download_data(start_num, end_num, level, part, type): from_num, to_num, url = output_ranges(level, part, type) logger.info("Generating the list of Roll Numbers") nums = list(range(from_num, to_num + 1)) rnlist = [RollNo(str(n), idx, "") for idx, n in enumerate(nums)] logger.info("Start Parameter is: {}".format(start_num)) logger.info("End Parameter is: {}".format(end_num)) logger.info("Saving data to database {}".format(DBNAME)) try: with orm.db_session: # noinspection PyTypeChecker last_record = orm.max(r.id for r in Record) if last_record == end_num: logger.info("No new data to download. Exiting...") return if last_record: # noinspection PyTypeChecker last_idx = orm.select(r.idx for r in Record if r.id == last_record)[:][0] start = last_idx + 1 else: start = start_num logger.info( "Starting the Brute Force Search from position {}".format(start)) logger.info("Process started at {}".format(time.strftime('%c'))) for idx, rn in enumerate(rnlist[start:end_num]): if idx % WAIT == 0: time.sleep(5) if idx % 25 == 0: logger.info("Downloading data for Roll No. {}".format( rn.roll_no)) visit(url, rn, idx=rn.idx) logger.info("Process ended at {}".format(time.strftime('%c'))) except KeyboardInterrupt as err: logger.exception("{0}".format(err)) logger.exception("Recieved Keyboard Interrupt. Exiting.") except: logger.exception("Unexpected error:", sys.exc_info()[0]) raise
def update_last_feed(): now = dtt.datetime.now() last_ended_feed = orm.max(d.end_time for d in Breastfeeding) is_left = orm.select(d.is_left for d in Breastfeeding if d.end_time == last_ended_feed).first() if last_ended_feed is None: last_ended_feed = now side_str = '--' else: side_str = 'ΑΡΙΣΤΕΡΟ' if is_left else 'ΔΕΞΙ' dt = (now - last_ended_feed).total_seconds() hours = dt // 3600 minutes = (dt - hours * 3600) // 60 s = ('Έφαγε από το ' + side_str + ' στήθος\n' + f'πριν {hours:02.0f} ώρες {minutes:02.0f} λεπτά') return s
def rain_text(): with orm.db_session: # fetch most current datetime for val and days latest = orm.max(v.time_stamp for v in TotalRainfall) if not isinstance(latest, datetime.datetime): latest = dparser.parse(latest) val = orm.select(v for v in TotalRainfall if v.time_stamp == latest) # should only obtain single ement with latest timestamp for v in val: days = int(v.days) value = v.value listDescription = [ "The total rainfall has been " + str(round(value, 2)) + " inches over the past " + str(days) + " days." ] return listDescription
def checkstorycomments(story_ids): orm.sql_debug(False) if not story_ids: with orm.db_session: first_story = orm.select(orm.min(x.id) for x in Story).first() last_story = orm.select(orm.max(x.id) for x in Story).first() story_id = first_story else: i = 0 while True: with orm.db_session: if not story_ids: story = Story.select( lambda x: x.id >= story_id and x.id <= last_story).first() if not story: break else: if i >= len(story_ids): break story = Story.get(id=story_ids[i]) i += 1 if not story: print('Story {} not found'.format(story_ids[i - 1])) continue print('Story {}'.format(story.id)) comments_list = story.bl.select_comments().order_by('c.date, c.id') # Проверка story_published pub = story.published for c in comments_list: if c.story_published != pub: print(' -{}: pub {} -> {}'.format(c.id, c.story_published, pub)) c.story_published = pub c.flush() # Всё остальное здесь check_comments_for(story, comments_list) story_id = story.id + 1
def checkstorylocalcomments(story_ids): orm.sql_debug(False) if not story_ids: with orm.db_session: first_local = orm.select(orm.min(x.id) for x in StoryLocalThread).first() last_local = orm.select(orm.max(x.id) for x in StoryLocalThread).first() local_id = first_local else: i = 0 while True: with orm.db_session: if not story_ids: local = StoryLocalThread.select( lambda x: x.id >= local_id and x.id <= last_local).first() if not local: break else: if i >= len(story_ids): break story = Story.get(id=story_ids[i]) i += 1 if not story: print('Story {} not found'.format(story_ids[i - 1])) continue if not story.local: print('Story {} has no StoryLocalThread'.format( story_ids[i - 1])) continue local = story.local print('Story {} / StoryLocalThread {}'.format( local.story.id, local.id)) comments_list = local.bl.select_comments().order_by('c.date, c.id') # Всё остальное здесь check_comments_for(local, comments_list) local_id = local.id + 1
def checkstorycomments(story_ids): orm.sql_debug(False) if not story_ids: with orm.db_session: first_story = orm.select(orm.min(x.id) for x in Story).first() last_story = orm.select(orm.max(x.id) for x in Story).first() story_id = first_story else: i = 0 while True: with orm.db_session: if not story_ids: story = Story.select(lambda x: x.id >= story_id and x.id <= last_story).first() if not story: break else: if i >= len(story_ids): break story = Story.get(id=story_ids[i]) i += 1 if not story: print('Story {} not found'.format(story_ids[i - 1])) continue print('Story {}'.format(story.id)) comments_list = story.bl.select_comments().order_by('c.date, c.id') # Проверка story_published pub = story.published for c in comments_list: if c.story_published != pub: print(' -{}: pub {} -> {}'.format(c.id, c.story_published, pub)) c.story_published = pub c.flush() # Всё остальное здесь check_comments_for(story, comments_list) story_id = story.id + 1
def enterPost(): rf = request.forms formDate = rf.date formAM = rf.am formAM = None if formAM == '' else formAM formPM = rf.pm formPM = None if formPM == '' else formPM formNote = rf.comment sessionID = HTTPCookie.getSessionCookie(request) respData = MultiDict(url=url, title='Blood Glucose', sessionID=sessionID) respData.update(MultiDict(numberOfHeldReadings=numberOfPartials())) respData.update(MultiDict(editDate=formDate)) with db_session: maxReadingDate = max(r.date for r in Readings) respData.update(MultiDict(maxDate=maxReadingDate)) valueError = False if not isFloat(formAM): valueError = True respData.update(MultiDict(AMisNotOK=True, errorValue=formAM)) if formPM is not None: if not isFloat(formPM): valueError = True respData.update(MultiDict(PMisNotOK=True, errorValue=formPM)) if valueError: return jinja2_template('EnterReading.jinja2', respData, template_lookup=['templates']) alreadyEntered = False try: with db_session: Readings(date=formDate, am=formAM, pm=formPM, comment=formNote) except (IntegrityError, Exception): alreadyEntered = True if alreadyEntered: respData.update(MultiDict(alreadyEntered=True)) return jinja2_template('EnterReading.jinja2', respData, template_lookup=['templates']) respData.update(MultiDict(numberOfHeldReadings=numberOfPartials())) System.putLastReadingDateStr(dateTimeStr(datetime.now(), 'America/Vancouver', ampm=True, month=True)) return jinja2_template('Admin.jinja2', respData, template_lookup=['templates'])
def stats(project=None, run_id=None): if not run_id: run_id = P.max(r.id for r in RunId) fail = P.select(n for n in NodeResolution if not n.success and n.run_id.id == run_id) success = P.select(n for n in NodeResolution if n.success and n.run_id.id == run_id) if project: fail = fail.filter(lambda n: n.node.file.project == project) success = success.filter(lambda n: n.node.file.project == project) nb_failures, nb_successes = P.count(fail), P.count(success), nb_total = nb_failures + nb_successes return { 'nb_failures': nb_failures, 'nb_successes': nb_successes, 'failures_pct': nb_failures / float(nb_total) * 100, 'successes_pct': nb_successes / float(nb_total) * 100 }
def download_data(from_num, to_num): rn1 = [str(x).zfill(2) for x in range(0, 100)] rn2 = [str(x).zfill(3) for x in range(0, 100)] rn3 = [str(x).zfill(3) for x in range(0, 100)] rnlist = [RollNo(a, b, c, "") for a in rn1 for b in rn2 for c in rn3] URL = "http://pec.edu.pk" INVALID_RESULT = "No Result found" print("Start Parameter is: {}".format(options.start)) print("End Parameter is: {}".format(options.end)) print("Saving data to database {}".format(DBNAME)) with orm.db_session: last_record = orm.max(r.id for r in Record) if last_record == to_num: print("No new data to download. Exiting...") return if last_record: last_roll_num = RollNo(*list( orm.select( (r.rollno1, r.rollno2, r.rollno3) for r in Record if r.id == last_record)[:][0]) + [""]) start = rnlist.index(last_roll_num) + 1 else: start = from_num print("Starting the Brute Force Search from position {}".format(start)) print("Process started at {}".format(time.strftime('%c'))) for idx, rn in enumerate(rnlist[start:to_num]): if idx % 25 == 0: print("{} Downloading data for Roll No. {}".format( time.strftime("%c"), "-".join(rn[:3]))) visit(URL, rn, invalid=INVALID_RESULT, idx=rnlist.index(rn)) print("Process ended at {}".format(time.strftime('%c')))
def checknewscomments(news): orm.sql_debug(False) if not news: with orm.db_session: first_newsitem = orm.select(orm.min(x.id) for x in NewsItem).first() last_newsitem = orm.select(orm.max(x.id) for x in NewsItem).first() newsitem_id = first_newsitem else: i = 0 while True: with orm.db_session: if not news: newsitem = NewsItem.select(lambda x: x.id >= newsitem_id and x. id <= last_newsitem).first() if not newsitem: break else: if i >= len(news): break if news[i].isdigit(): newsitem = NewsItem.get(id=int(news[i])) else: newsitem = NewsItem.get(name=news[i]) i += 1 if not newsitem: print('News item {} not found'.format(news[i - 1])) continue print('News item {} ({})'.format(newsitem.id, newsitem.name)) comments_list = newsitem.bl.select_comments().order_by( 'c.date, c.id') check_comments_for(newsitem, comments_list) newsitem_id = newsitem.id + 1
def checkstorylocalcomments(story_ids): orm.sql_debug(False) if not story_ids: with orm.db_session: first_local = orm.select(orm.min(x.id) for x in StoryLocalThread).first() last_local = orm.select(orm.max(x.id) for x in StoryLocalThread).first() local_id = first_local else: i = 0 while True: with orm.db_session: if not story_ids: local = StoryLocalThread.select(lambda x: x.id >= local_id and x.id <= last_local).first() if not local: break else: if i >= len(story_ids): break story = Story.get(id=story_ids[i]) i += 1 if not story: print('Story {} not found'.format(story_ids[i - 1])) continue if not story.local: print('Story {} has no StoryLocalThread'.format(story_ids[i - 1])) continue local = story.local print('Story {} / StoryLocalThread {}'.format(local.story.id, local.id)) comments_list = local.bl.select_comments().order_by('c.date, c.id') # Всё остальное здесь check_comments_for(local, comments_list) local_id = local.id + 1
def get_vals(Database, days): days_delta = datetime.timedelta(days=days) with orm.db_session: # Get the datetime of the latest value in the database latest = orm.max(v.time_stamp for v in Database) if not isinstance(latest, datetime.datetime): latest = dparser.parse(latest) datetime_cutoff = latest - days_delta values = orm.select(v for v in Database if v.time_stamp > datetime_cutoff) # x axis values to plot (datetime) x = [] # y axis values to plot (value) y = [] for value in values: cur_time_stamp = value.time_stamp # print(value, end=": ") # print(value.value) # same bug as above if not isinstance(cur_time_stamp, datetime.datetime): cur_time_stamp = dparser.parse(cur_time_stamp) x.append(cur_time_stamp.timestamp()) y.append(value.value) return x, y
def get(limit, padding, orderby: list, search=None, lastInterpretation=None, interpretationNumber=None, score=None, showlights=None, vocals=None, odlc=None, arrangements=None): search_results = orm.select(s for s in Song) # fuzzy search if search: search_results = search_results.where( orm.raw_sql('similarity("s"."fts_col", $search) > .1')) # add similarity to the order by array orderby.insert(0, '-similarity') # does the song has showlights if showlights: search_results = search_results.where( lambda s: s.showlights == showlights) # does the song display lyrics if vocals: search_results = search_results.where(lambda s: s.vocals == vocals) # is the song a odlc or a cdlc if odlc is not None: search_results = search_results.where(lambda s: s.official == odlc) # --- arrangement specific fields --- # does the song have certain arrangements if arrangements: filter_function = '' for i in range(0, len(arrangements)): filter_function += 'or arrangements[{}] in s.arrangements.type '.format( i) filter_function = filter_function.split('or ', 1)[1] search_results = search_results.where(filter_function) # --- interpretation specific fields --- # how many times does the song was played if interpretationNumber != [0, 100]: lower_bound = min(interpretationNumber[0], interpretationNumber[1]) upper_bound = max(interpretationNumber[0], interpretationNumber[1]) search_results = search_results.where( lambda s: lower_bound <= orm.count(s.interpretations) and (orm.count(s.interpretations) <= upper_bound or upper_bound >= 100 )) if lastInterpretation != [0, 100]: # higher bound in days to allow no maximum calculation when >=100 upper_bound = max(lastInterpretation[0], lastInterpretation[1]) # datetime bounds to be used in where clause older_bound = datetime.now() - timedelta( days=max(lastInterpretation[0], lastInterpretation[1])) youger_bound = datetime.now() - timedelta( days=min(lastInterpretation[0], lastInterpretation[1])) search_results = search_results.where(lambda s: youger_bound > orm.max( s.interpretations.date) and (orm.max(s.interpretations.date) <= older_bound or upper_bound >= 100)) if score != [0, 100]: lower_bound = Decimal(min(score[0], score[1])) upper_bound = Decimal(max(score[0], score[1])) search_results = search_results.where( lambda s: lower_bound <= orm.max(s.interpretations.score) and orm. max(s.interpretations.score) <= upper_bound) # apply order by, limit and padding search_results = search_results \ .order_by(format_order_by(orderby)) \ .limit(limit=limit, offset=padding) return {'data': [s.serialize() for s in search_results]}, 200
def suggest_song(search, lastInterpretation=None, interpretationNumber=None, score=None, showlights=None, vocals=None, odlc=None, arrangements=None): # fuzzy search song_results = orm \ .select((s.name, s.album, s.artist, orm.raw_sql('similarity("s"."fts_col", $search)')) for s in Song) \ .distinct() \ .where(format_similarity('fts_col', search)) # does the song has showlights if showlights: song_results = song_results.where(lambda s: s.showlights == showlights) # does the song display lyrics if vocals: song_results = song_results.where(lambda s: s.vocals == vocals) # is the song a odlc or a cdlc if odlc is not None: song_results = song_results.where(lambda s: s.official == odlc) # --- arrangement specific fields --- # does the song have certain arrangements if arrangements: filter_function = '' for i in range(0, len(arrangements)): filter_function += 'or arrangements[{}] in s.arrangements.type '.format( i) filter_function = filter_function.split('or ', 1)[1] song_results = song_results.where(filter_function) # --- interpretation specific fields --- # how many times does the song was played if interpretationNumber != [0, 100]: lower_bound = min(interpretationNumber[0], interpretationNumber[1]) upper_bound = max(interpretationNumber[0], interpretationNumber[1]) song_results = song_results.where(lambda s: lower_bound <= orm.count( s.interpretations) and (orm.count(s.interpretations) <= upper_bound or upper_bound >= 100)) if lastInterpretation != [0, 100]: # higher bound in days to allow no maximum calculation when >=100 upper_bound = max(lastInterpretation[0], lastInterpretation[1]) # datetime bounds to be used in where clause older_bound = datetime.now() - timedelta( days=max(lastInterpretation[0], lastInterpretation[1])) youger_bound = datetime.now() - timedelta( days=min(lastInterpretation[0], lastInterpretation[1])) song_results = song_results.where(lambda s: youger_bound > orm.max( s.interpretations.date) and (orm.max(s.interpretations.date) <= older_bound or upper_bound >= 100)) if score != [0, 100]: lower_bound = Decimal(min(score[0], score[1])) upper_bound = Decimal(max(score[0], score[1])) song_results = song_results.where( lambda s: lower_bound <= orm.max(s.interpretations.score) and orm. max(s.interpretations.score) <= upper_bound) # apply order by, limit and padding return song_results \ .order_by(format_order_by(['-similarity', 'name', 'album', 'artist'])) \ .limit(limit=5)
def maps(): form = AirportsForm(request.form) departure = None rate = 1 mapcost = '' mapduration = '' mapeasiness = '' msg = '' did = '' with orm.db_session: logs = Log.select(lambda p: p.country == 'AIR').order_by( orm.desc(Log.id))[:] previous = orm.select(c.departure for c in Direction).prefetch(Airport)[:] if logs[0].dt + timedelta(days=1) > datetime.now(): delta = (logs[0].dt + timedelta(days=1) - datetime.now()).seconds hours = (delta // 3600) minutes = (delta - hours * 3600) // 60 msg = 'New search will be available in %s hour(s) %s minutes.' % ( hours, minutes) else: airports = Airport.select().order_by(Airport.country)[:] form.airports.choices = [ (x.id, '%s - %s, %s' % (x.country, x.code, x.city)) for x in airports ] if request.method == 'GET': did = request.values.get('did', '') currency = 'USD' if did != '': cr = CurrencyRates() with orm.db_session: departure = Airport.get(id=int(did)) #directions=Direction.select(lambda p: p.departure==departure).prefetch(Airport)[:] directions = Direction.select_by_sql( 'select d0.* from Directions d0 join Airports a0 on a0.id=d0.destination where d0.cost=(select min(d.cost) from Directions d join Airports a on a.id=d.destination where a.country=a0.country and d.departure=d0.departure) and departure=%s' % departure.id) try: rate = cr.get_rate(directions[0].currency, 'USD') except RatesNotAvailableError: if directions[0].currency == 'KZT': rate = 0.0032 else: rate = 1 currency = directions[0].currency locations = [d.destination.iso3 for d in directions] texts = [ '%s - %s, %s' % (d.destination.country, d.destination.code, d.destination.city) for d in directions ] vals = [int(d.cost * rate) for d in directions] costperm = [ int(d.cost * rate * d.duration / float(d.distance)) for d in directions ] times = [d.duration // 60 for d in directions] mapcost = makemap( 'Flight cost from %s to some countries.' % departure.code, 'Cost, %s' % currency, locations, vals, texts) mapduration = makemap( 'Total flight duration, best from 3 most cheap flight.', 'Duration, hours', locations, times, texts) mapeasiness = makemap('Easiness (cost * duration / distance).', 'Easiness, $*h/mi', locations, costperm, texts) elif form.validate_on_submit() and msg == '': with orm.db_session: dtstr = (datetime.now() + timedelta(days=30)).strftime('%Y-%m-%d') departure = Airport.get(id=int(request.form.get('airports'))) iteration = orm.max(p.iteration for p in Direction if p.departure == departure) if iteration == None: iteration = 0 if iteration < 3: iteration += 1 else: iteration = 1 dirs = Direction.select(lambda p: p.departure == departure and p. iteration == iteration).delete(bulk=True) portstodo = Airport.select(lambda p: p.iteration == iteration and p .code != departure.code)[:] rapid = RapidConnect("flight-167611", "12489be9-a1b2-442f-9f98-77609a7d6a9d") for x in portstodo: processdestination(rapid, departure, x, dtstr, iteration) log = Log(keyword='none', dt=datetime.now(), country='AIR') return redirect('%s?did=%s' % (url_for('maps'), departure.id)) return render_template('parsers/maps.jade', form=form, msg=msg, departure=departure, previous=previous, mapcost=mapcost, mapduration=mapduration, mapeasiness=mapeasiness)
def __init__(self, **kwargs): if "id" not in kwargs: kwargs["id"] = (orm.max(x.id for x in User) or 0) + 1 super().__init__(**kwargs)
def __init__(self, **kwargs): # Backwards compatibility for when SQLAlchemy was used, Auto Increment # was not enabled there. if "id" not in kwargs: kwargs["id"] = (orm.max(x.id for x in Build) or 0) + 1 super(Build, self).__init__(**kwargs)