def addComment(sub, post): if sub == 'food' or sub == 'pizza' or sub == 'sushi': getquote = session2.query(FoodStuff).order_by(func.rand()).first() print("sub", sub) try: post.reply(str(getquote.comment)) except: pass print("getquote.comment") print("added comment ..") elif sub == 'funny': getquote = session2.query(FunnyStuff).order_by(func.rand()).first() print(getquote.id) try: post.reply(str(getquote.comment)) except: pass print("getquote.comment") print("added comment ..") else: print("failue") pass
def addcomment(sub, post): if sub == 'food' or sub == 'pizza' or sub == 'sushi': getquote = session.query(FoodStuff).order_by(func.rand()).first() print("sub", sub) try: post.reply(str(getquote.comment)) except Exception as e: print(str(e)) pass print("Comment: ", getquote.comment) print("") elif sub == 'funny': getquote = session.query(FunnyStuff).order_by(func.rand()).first() print(getquote.id) try: post.reply(str(getquote.comment)) except Exception as e: print(str(e)) pass print("Comment: ", getquote.comment) else: print("No Sub Found") pass
def get_stale_publications_ids(self, number, days_old=7): now = datetime.datetime.now() # First, get the publications that have never been updated. q = self.session.query(Publication) q = q.filter(Publication.scholar_id != None, Publication.retrieved_at == None) q = q.options(load_only('scholar_id')) q = q.order_by(func.rand()).limit(number) publications = q.all() # Add stale publications (that haven't been updated for # a given amount of time). if len(publications) < number: rest = number - len(publications) q2 = self.session.query(Publication) q2 = q2.filter( Publication.scholar_id != None, Publication.retrieved_at < func.subdate( func.curdate(), days_old)) q2 = q2.options(load_only('scholar_id')) q2 = q2.order_by(func.rand()).limit(rest) publications.extend(q2.all()) return [p.scholar_id for p in publications]
def post(self): form = AddTestPagerForm(request.form) if form.validate(): # 接收前台数据 choiceSize = int(request.form["choiceSize"]) choiceScore = int(request.form["choiceScore"]) shortAnswerSize = int(request.form["shortAnswerSize"]) shortAnswerScore = int(request.form["shortAnswerScore"]) programSize = int(request.form["programSize"]) programScore = int(request.form["programScore"]) type = request.form["type"] name = request.form['name'].strip() # 试卷的总分等于 = # 选择器的个数*单个选择题的分值 + 简答题的个数*单个简答题的分值 + 程序设计题的个数*单个程序设计提的分值 totalscore = (choiceSize*choiceScore) + (shortAnswerSize*shortAnswerScore) + (programSize*programScore) count_choice = db_session.query(Choice).filter(Choice.choice_type==type).count() count_short_answer = db_session.query(ShortAnswer).filter(ShortAnswer.short_answer_type==type).count() count_program = db_session.query(Program).filter(Program.program_type==type).count() # 如果数据库选择题的个数小于需要生成的个数 if count_choice < choiceSize: # 把所有的题全部抽出来 choices = db_session.query(Choice).all() message1 = '题库中选择题不够%s个, 现已全部引用' % choiceSize # 如果需要生成的选择题的个数小于数据库中存储的数量 else: choices = db_session.query(Choice).order_by(func.rand(Choice.choice_id))[0:choiceSize] if count_short_answer < shortAnswerSize: short_answers = db_session.query(ShortAnswer).all() message2 = '题库中简答题不够%s个, 现已全部引用' % shortAnswerSize else: short_answers = db_session.query(ShortAnswer).order_by(func.rand(ShortAnswer.short_answer_id))[0:shortAnswerSize] if count_program < programSize: programs = db_session.query(Program).all() message3 = '题库中简答题不够%s个, 现已全部引用' % programSize else: programs = db_session.query(Program).order_by(func.rand(Program.program_id))[0:programSize] # 创建试卷对象 test_pager = TestPager(test_pager_name=name, test_pager_type=type, test_pager_choice_score=choiceScore, test_pager_short_answer_score=shortAnswerScore, test_pager_program_score=programScore, test_pager_choice_num=choiceSize, test_pager_short_answer_num=shortAnswerSize, test_pager_program_num=programSize, test_pager_total_score=totalscore) # 选择题,简答题,程序设计题绑定到试卷中 test_pager.test_pager_choices.extend(choices) test_pager.test_pager_short_answers.extend(short_answers) test_pager.test_pager_programs.extend(programs) db_session.add(test_pager) db_session.commit() content = { # 'message1': message1, # 'message2': message2, # 'message3': message3, 'totalScore': totalscore } return render_template('front/front_add_test_page.html', **content) else: message = form.errors.popitem()[1][0] return render_template('front/front_add_test_page.html', message=message)
def get_random_prize(cls, connection): stmt = connection.query(Prize).\ filter(Prize.user_id.is_(None)).\ filter(Prize.probability == 1).\ order_by(func.rand()).limit(1).scalar() if stmt is None: stmt = connection.query(Prize).\ filter(Prize.user_id.is_(None)).\ order_by(func.rand()).limit(1).scalar() return stmt
def index(self, **kw): # items = DBSession.query(Item).all() # my_page = Page(items, page = int(kw.get("page", 1)), url = lambda page:"%s?page=%d" % (self.request.path, page)) # self.render("index.html", my_page = my_page) items = DBSession.query(Item).all()[:9] try: show_case1 = DBSession.query(Item).order_by(func.random()).all()[:4] show_case2 = DBSession.query(Item).order_by(func.random()).all()[:4] except: show_case1 = DBSession.query(Item).order_by(func.rand()).all()[:4] show_case2 = DBSession.query(Item).order_by(func.rand()).all()[:4] self.render("index.html", items = items, show_case1 = show_case1, show_case2 = show_case2)
def posts(count=50): count = int(count) for i in range(count): author_id = User.query.order_by(func.rand()).limit(1).first().id board_id = BoardModel.query.order_by(func.rand()).limit(1).first().id p = PostModel(title=fake.paragraph(1), content=fake.text(), create_time=fake.past_datetime(), author_id=author_id, board_id=board_id) db.session.add(p) db.session.commit() return '帖子数据生成成功!'
def get_word_random(self, db: Session) -> Word: if engine.name == "sqlite" or "postgresql": return db.query(Word).order_by(func.random()).first() elif engine.name == "mysql": return db.query(Word).order_by(func.rand()).first() elif engine.name == "oracle": return db.query(Word).order_by("dbms_random.value").first()
def category(): """分类页""" genre_id = request.args.get('genre_id', 0, int) page = request.args.get('page', 1, int) cate_id = request.args.get('cate_id', 1, int) works = Work.query.filter_by(cate_id=cate_id) # 按照类型对应的作品数对类型排序 from sqlalchemy import func genres = [genre for genre, count in db.session.query(Genre, func.count(work_genres.c.work_id).label( 'total')).filter_by(cate_id=cate_id).join(work_genres).group_by(Genre).order_by('total desc').all()] if genre_id: if genre_id == -1: # 随机 works = works.order_by(func.rand()) else: # use join # works = works.join(Genre.works).filter(Genre.id == genre_id) works = works.filter(Work.genres.any(id=genre_id)).order_by( Work.created.desc()) else: works = works.order_by(Work.created.desc()) # newest_comments = Comment.query.order_by(Comment.created.desc()).limit(5) newest_comments = Comment.query.join(Work).filter_by(cate_id=cate_id).order_by(Comment.created.desc()).limit(5) total = Recommendation.query.count() success = Recommendation.query.filter( Recommendation.status_id == 3).count() failure = Recommendation.query.filter( Recommendation.status_id == 4).count() works = works.paginate( page, current_app.config['FLASK_WORKS_PER_PAGE'], error_out=True) return render_template('site/category.html', works=works, newest_comments=newest_comments, current_time=datetime.utcnow(), total=total, success=success, failure=failure, genres=genres, genre_id=genre_id, page=page, cate_id=cate_id)
def pick_facts(): conn = engine.connect() s = select([facts]).order_by(func.rand()) r = conn.execute(s) row = r.fetchone() result = row['content'] return result
def modeledFieldGroupByCategoryAndCount(queryObject, columnName, PcolumnName, metaseek_power=0.9, sampleRate=0.2, numCats=False): columnObject = getattr(Dataset, columnName) columnObjectP = getattr(Dataset, PcolumnName) query = ( queryObject.with_entities( columnObject) # choose only the column we care about .filter(columnObjectP >= metaseek_power ) #subset only those rows with P value above threshold .filter(func.rand() < sampleRate) # grab random sample of rows .add_columns(func.count(1).label('count')) # add count to response .group_by(columnName) # group by .order_by(desc('count')) # order by the largest first ) # If no numCats is passed in, show all the groups if numCats: query = query.limit(numCats) # show the top N results #TODO maybe: count 'other column' if numCats, where sum counts all but top numCats fields return (dict( (key, val * (1 / sampleRate)) for key, val in # rescale sampled columns to approx. results on full dataset query.all() # actually run the query ))
def get(self): #TODO: wishes = self.session.query(models.Wish).filter_by(is_public=1).order_by(func.rand())[0:19] self.arg.update({ 'wishes' : wishes }) self.render('wish-pool.html',arg=self.arg)
def main(): user = session2.query(Bots).filter(Bots.client_id != "").order_by( func.rand()).first() print("") print("") print("*" * 10) print("User: "******"Title: ", s.title) addcomment(post=s, sub=subz) currentcount = user.post_count newcount = int(currentcount) + 1 user.post_count = newcount print("new user post count: ", newcount) session2.add(user) session2.commit() print("") time.sleep(500)
def random(self, n=1, dialect=None): """Returns n random model instances. :param n: the number of instances to return :type n: int :param dialect: the engine dialect (the implementation of random differs between MySQL and SQLite among others). By default will look up on the query for the dialect used. If no random function is available for the chosen dialect, the fallback implementation uses total row count to generate random offsets. :type dialect: str :rtype: model instances """ if dialect is None: conn = self._connection_from_session() dialect = conn.dialect.name if dialect == 'mysql': rv = self.order_by(func.rand()).limit(n).all() elif dialect in ['sqlite', 'postgresql']: rv = self.order_by(func.random()).limit(n).all() else: # fallback implementation count = self.count() rv = [self.offset(randint(0, count - 1)).first() for _ in range(n)] if len(rv) == 1: return rv[0] return rv
def random(self, n_instances=1, dialect=None): """Returns random model instances. :param n_instances: the number of instances to return :type n_instances: int :param dialect: the engine dialect (the implementation of random differs between MySQL and SQLite among others). By default will look up on the query for the dialect used. If no random function is available for the chosen dialect, the fallback implementation uses total row count to generate random offsets. :type dialect: str :rtype: model instances """ if dialect is None: dialect = self.session.get_bind().dialect.name if dialect == 'mysql': instances = self.order_by(func.rand()).limit(n_instances).all() elif dialect in ['sqlite', 'postgresql']: instances = self.order_by(func.random()).limit(n_instances).all() else: # fallback implementation count = self.count() instances = [ self.offset(randint(0, count - 1)).first() for _ in range(n_instances) ] if len(instances) == 1: return instances[0] return instances
def get(self): admin = g.admin if admin: uncheckUser = User.query.filter_by(lastStatus=2).order_by( func.rand()).first() if uncheckUser: try: uncheckEvent = Event.query.filter_by( username=uncheckUser.username).order_by( Event.time.desc()).first() uncheckUser.lastStatus = 3 return jsonify({ 'id': uncheckEvent.id, 'originPhotoUrl': systemPhotoToUrl(uncheckUser.photoName), 'eventPhotoUrl': uploadPhotoToUrl(uncheckEvent.uploadPhotoName), "code": 200 }) except: return jsonify({"code": 500}) else: return jsonify({"code": 204}) else: return jsonify({"code": 401})
def on_get(self, req, resp, *args, **kwargs): super(ResourceGetRandomQuestion, self).on_get(req, resp, *args, **kwargs) category_filter = req.get_param("category", False) if category_filter is not None: if category_filter not in [ i.value for i in CategoryEnum.__members__.values() ]: raise falcon.HTTPInvalidParam(messages.event_status_invalid, "category") query = self.db_session.query(Question).filter().order_by(func.rand()) if category_filter is not None: query = query.filter( Question.category == CategoryEnum(category_filter)) question = query.first() print(question.json_model) query = self.db_session.query(Answer, AnswerQuestionAssiation.is_correct) \ .join(AnswerQuestionAssiation).filter(question.id == AnswerQuestionAssiation.id_question) answers = query.all() response = question.json_model response["answers"] = [] for a in answers: aux = a[0].json_model aux["is_correct"] = a[1] response["answers"].append(aux) resp.media = response resp.status = falcon.HTTP_200
def get_candidate(song_id): # for current_user.id # lookup songs that have at least 2 versions, choose one # choose a version, iterate through the rest of the versions, # looking for a pair that current_user.id has not already rated # present the first found # this logic is dumb as can be at present (poor performance) try: user_id = current_user.id if current_user else 0 except AttributeError: user_id = 0 versions = db.session.query(Version.version_id,Version.song_id,Version.date,Version.url,Song.name,Venue.name.label('venue_name'),Venue.location) \ .join(Song, Song.phishin_id == Version.song_id) \ .join(Show, Show.phishin_id == Version.show_id) \ .join(Venue, Show.venue_id == Venue.phishin_id) \ .filter(Version.song_id == song_id).order_by(func.rand()).all() # if versions: # print(versions) for lhs in versions: other_versions = list(versions) shuffle(other_versions) for rhs in other_versions: if lhs != rhs: if db.session.query(Vote).\ filter(and_(Vote.created_by == user_id, or_(and_(Vote.lhs == lhs.version_id, Vote.rhs == rhs.version_id), and_(Vote.lhs == rhs.version_id, Vote.rhs == lhs.version_id)))).count() == 0: # user hasn't voted return lhs, rhs return None, None
def batch_crawl_publications(): """ Crawls Google Scholar in order to retrieve information about publications in batches. """ FETCH_LIMIT = 1000 SLEEP_TIME = 10 while True: publications = Publication.query.filter( Publication.scholar_id != None, Publication.retrieved_at == None).options( load_only('scholar_id')).order_by( func.rand()).limit(FETCH_LIMIT).all() count = 0 for publication in publications: url = url_for('crawl_publication', _external=True) data = urlencode({'scholar_id': publication.scholar_id}) req = Request(url, data) res = urlopen(req, timeout=30) count += 1 time.sleep(SLEEP_TIME) print 'Crawled ' + str(count) + ' publications.' if count == 0: break return 'Done.'
def get_quotas(): """ 获取短句,9个短句+1个广告 :return: """ form = GetQuotasForm().validate_for_api() per_page = 9 # 随机获取短句 if form.order_by.data == 'random': if form.category.data != '全部': category = Category.query.filter_by( content=form.category.data).first_or_404() items = Quota.query.with_parent(category).filter_by( delete_time=None).order_by(func.rand()).limit(9).all() else: items = Quota.query.filter_by(delete_time=None).order_by( func.rand()).limit(9).all() # 将语录格式整理为分页格式 quotas = { 'items': items, 'has_next': True, 'page': form.page.data, 'next_page': form.page.data + 1 } # 按发布时间降序获取短句 elif form.order_by.data == 'desc': if form.category.data != '全部': category = Category.query.filter_by( content=form.category.data).first_or_404() # 若有分类参数,则按类别查数据 quotas = Quota.query.with_parent(category).filter_by( delete_time=None).order_by(Quota._create_time.desc()).paginate( form.page.data, per_page) else: quotas = Quota.query.filter_by(delete_time=None).order_by( Quota._create_time.desc()).paginate(form.page.data, per_page) quotas = json_paginate(quotas) else: raise ParameterException() ad = Ad.query.filter_by(delete_time=None).order_by( func.rand()).limit(1).first() if ad is not None: quotas['items'].append(ad) return jsonify(quotas)
def get_rand_xjj_for_tg(self,style,district): print("开始为tg用户随机查找一条小姐姐性息") rs = Xjj.query.filter( Xjj.style.like("%" + style + "%") , Xjj.district.like("%" + district + "%"), ).order_by(func.rand()).first() print("结束查询小姐姐性息") return rs
def random_products(s, start=0, finish=20): order_by_list = [func.random(), func.rand(), 'dbms_random.value'] for o in order_by_list : try: return s.query(Product).order_by(o).offset(0).limit(finish) except: pass return []
def query_db(self): try: self.session = self.Session() self.records = self.session.query(Model).order_by( func.rand()).yield_per(10000) # for record in records: # yield record except Exception as x: logger.error(x)
def discovery(): # recipes = rec.query.all(); # posts = postss.query.all(); obj = rec.query.count() recipes = rec.query.order_by(func.rand()).first() recuser = users.query.get(recipes.user_id) print(recipes.dateposted) formsearch = RecipeSearchForm() return render_template('discovery.html', rec = recipes, recuser = recuser, form5=formsearch)
def get_rand_data(): # id type : int # rtype : tuple target = session.query(English).filter(English.used != True).order_by( func.rand()).first() result = (target.id, target.word, target.symbol, target.translate, target.en, target.zh) target.used = True session.commit() return result
def DebugSignRandomPeopleIn(self, howmany): for person in self.GetPeopleInShop(): self.SignPersonOut(person) people = db.session.query(Person) \ .order_by(func.rand()) \ .limit(howmany).all() for person in people: self.SignPersonIn(person, "shoptime")
def get_random_passage_id(): """ getting random id of the passage which is present in db :return: id of the passage :rtype :int """ passage = Passage.query.order_by(func.rand()).first() if passage: return passage.id return passage
def get_random_phish_song(exclude=None): song_id = db.session.query(Song.phishin_id) \ .join(Band, Band.band_id == Song.band_id) \ .filter(Band.name == 'Phish') if exclude is not None: song_id = song_id.filter(Song.phishin_id != exclude) song_id = song_id.order_by(func.rand()).first().phishin_id return song_id
def question_set_config_gen(config): # TODO: need seperate the time parse start_time_str = config.get('start_time') duration_str = config.get('duration') question_set_config = config.get('problem_set_config') # time caculate start_time = datetime.datetime.strptime(start_time_str, '%Y/%m/%d %H:%M:%S') duration = datetime.datetime.strptime(duration_str, '%H:%M:%S') duration = datetime.timedelta(hours=duration.hour, minutes=duration.minute, seconds=duration.second) end_time = start_time + duration # problem set gen score_config = [] problem_set = {'select': [], 'fill': [], 'fix': [], 'coding': []} s = g.Session() try: for p_conf in question_set_config: t = p_conf.get('type') count = p_conf.get('number') l.debug("CONF:" + str(p_conf)) if count < 0: l.error('the configure setting is format err.') raise Exception score_per = p_conf.get('percentage_tatol') score_config.append({'type': t, 'percentage': score_per}) q = s.query(db.Questions).filter(db.Questions.question_type == t) db_count = q.count() if count > db_count: l.error('the number of ' + t + 'problem set is not enough' + 'current ' + db_count + 'need ' + count + '.') raise Exception problems = q.order_by(func.rand()).limit(count).all() for problem in problems: problem_set[t].append(problem.id) except Exception as e: l.error(e) finally: s.close() configure = { 'start_time': start_time, 'end_time': end_time, 'question_set': problem_set, 'score_config': score_config } return configure
def get_random_recording(self, model): try: from sqlalchemy import func return self.db.query(Recording) \ .filter(Recording.model == model) \ .filter(Recording.score < 0.8) \ .order_by(func.rand()) \ .limit(1) \ .one() except Exception: return None
def random(self): engine = str(db.engine) if 'postgresql' in engine: # Postgres return Session.query(self).order_by(func.random()).first() elif 'mysql' in engine: # MySQL return Session.query(self).order_by(func.rand()).first() elif 'sqlite' in engine: # sqlite return Session.query(self).order_by(func.random()).first()
def get_number(cls, open_id): """ 分配一个抽奖号码 :param open_id: """ award = cls.query.filter(cls.open_id.is_(None)).order_by( func.rand()).first() if award: award.open_id = open_id db.session.commit() return award
def get_order_by(self, query): engine_name = query.session.get_bind().name if engine_name in ['sqlite', 'postgresql']: return func.random() elif engine_name == 'mysql': return func.rand() elif engine_name == 'oracle': return 'dbms_random.value' else: raise UnsupportedEngine( '{engine_name} engine does not support random ordering.'.format(**locals()) )
def select_random(self, guild_id): with self.processing: session = self.session_creator() now = datetime.datetime.now() last_week = now - datetime.timedelta(days=7) sub_query = session.query(func.distinct(Message.user_id).label("user_id")).with_hint(Message, "USE INDEX(timestamp)").filter( Message.timestamp > last_week, Message.guild_id == guild_id).subquery() query = session.query(sub_query.c.user_id).order_by(func.rand()).limit(1) results = query.all() self.session_creator.remove() return results[0][0]
def get_random_public_quizzes(quizzes_number): quizzes = Quiz.query.filter_by(permission='public').order_by(func.rand()).limit(quizzes_number).all() for quiz in quizzes: questions = Question.get_active_questions_with_revisions_by_quiz_id(quiz.qid) if questions and len(questions) > 0: quiz.latitude = questions[0].question_revision.latitude quiz.longitude = questions[0].question_revision.longitude else: quiz.latitude = 37.4419 quiz.longitude = -122.1419 quiz.questions = questions quiz.user = User.get_user_by_id(quiz.user_id) return quizzes
def tracks(self, sort=None, filter=None, page=None): if sort is None: sort = "created" if filter is None: filter = "none" if page is None: page = 1 page = int(page) page_size = 70 offset = page_size * (page - 1) query = get_database().query(Track).filter(Track.scanned).group_by(Track.id) if sort == "created": query = query.order_by(Track.created.desc()) elif sort == "updated": query = query.order_by(Track.updated.desc()) elif sort == "random": query = query.order_by(func.rand()) page = None if filter == "woartist": query = query.filter("artist_id is null") elif filter == "woalbum": query = query.filter("album_id is null") elif filter == "invalid": query = query.filter("invalid is not null") elif filter == "duplicates": query = (query.join(TrackPath, Track.id == TrackPath.track_id) .having(func.count(distinct(TrackPath.id)) > 1)) total = query.count() pages = math.ceil(total / page_size) tracks = query.limit(page_size).offset(offset).all() return { 'tracks': tracks, 'page': page, 'page_size': page_size, 'total': total, 'pages': pages, 'sort': sort, 'filter': filter }
def refresh_authors(): """ Schedules some of the out-dated authors to be refreshed. """ OUT_DATE_LIMIT = 30 FETCH_LIMIT = 10000 authors = Author.query.filter(Author.scholar_id != None, func.rand() < P, or_(Author.retrieved_at == None, func.now() > func.adddate(Author.retrieved_at, OUT_DATE_LIMIT))).options(load_only('scholar_id')).limit(FETCH_LIMIT).all() count = 0 for author in authors: queue = taskqueue.Queue('author-fetchers') task = taskqueue.Task(url='/author/crawl', params={'scholar_id': author.scholar_id}) queue.add(task) count += 1 print 'Had ' + str(count) + ' authors refreshed.' return 'Refreshed.'
def refresh_publications(): """ Schedules some of the out-dated publications to be refreshed. """ OUT_DATE_LIMIT = 180 FETCH_LIMIT = 1000 publications = Publication.query.filter(Publication.scholar_id != None, func.rand() < P, or_(Publication.retrieved_at == None, func.now() > func.adddate(Publication.retrieved_at, OUT_DATE_LIMIT))).options(load_only('scholar_id')).limit(FETCH_LIMIT).all() count = 0 for publication in publications: queue = taskqueue.Queue('publication-fetchers') task = taskqueue.Task(url='/publication/crawl', params={'scholar_id': publication.scholar_id}) queue.add(task) count += 1 print 'Had ' + str(count) + ' publications refreshed.' return 'Refreshed.'
def filter_query(query, dirty=False, starting_year=None, ending_year=None, sample_size=None, model=None): filtered = query if not dirty: filtered = query.filter(Grant.clean == True) if ending_year is not None: filtered = filtered.filter(Grant.published_year <= ending_year) if starting_year is not None: filtered = filtered.filter(Grant.published_year >= starting_year) if model is not None: documents_in_set = filtered.count() model.documents_in_set = documents_in_set set_status("%d documents met filtering criteria" % documents_in_set) if sample_size is not None: filtered = filtered.order_by(func.rand()).limit(sample_size) if model is not None: documents_sampled = filtered.count() model.documents_sampled = documents_sampled set_status("%d documents were sampled" % documents_sampled) return filtered
__table_args__ = {'mysql_engine': 'InnoDB', 'sqlite_autoincrement': True} id = Column(Integer, nullable=False, primary_key=True) body = Column(Text, nullable=False) notes = Column(Text, nullable=True) rating = Column(Integer, nullable=False, default=0) votes = Column(Integer, nullable=False, default=0) submitted = Column(DateTime, nullable=False, default=now) status = Column(Integer, nullable=False, default=0) score = Column(DOUBLE(unsigned=True), nullable=False, default=1) tags = relationship("Tag", secondary=QuoteToTag) submitted_by = relationship("User", secondary=QuoteToUser, uselist=False) voters = relationship("VoteToUser") @property def upvotes(self): return len([v for v in self.voters if v.direction == 'up']) @property def downvotes(self): return len([v for v in self.voters if v.direction == 'down']) AREA_ORDER_MAP = { 'best': [Quote.rating.desc()], 'worst': [Quote.rating], 'random': [func.rand()], 'controversial': [Quote.votes, Quote.rating/Quote.votes] } DEFAULT_ORDER = [Quote.submitted.desc()]
def get_random_artifact(): return Artifact.query.order_by(func.rand()).first()
def test_index(self): ## Requires login res = self.test_client.get("/goals/users/0/") setup.assertRequiresLogin(self, res) ## 404s on bum user self.login() res = self.test_client.get("/goals/users/0/") setup.assert404(self, res) # Empty if user has no goals user_id = str(json.loads(self.test_client.get("/users/me/").data)['id']) res = self.test_client.get("/goals/users/" + user_id +"/") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 0) ## Shows only public goals even for current user setup.create_test_goals() res = self.test_client.get("/goals/users/" + user_id + "/") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 0) # No public goals public_goals = db.session.query(Goal).all() for goal in public_goals: goal.update({"public": True}) res = self.test_client.get("/goals/users/" + user_id + "/") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 20) # default count is 20 name_appendix = ord('a') is_public = False ## Test to make sure at least one is public is_private = False # none should be private for goal in data['goals']: if (goal['public']): is_public = True else: is_private = True self.assertTrue(goal['active']) # by default they're all active self.assertEqual(goal['name'], "test goal " + chr(name_appendix)) # by default sort by name ASC name_appendix = name_appendix + 1 self.assertTrue(is_public) self.assertFalse(is_private) ## Only shows public goals for other users self.logout() self.login_other_user() res = self.test_client.get("/goals/users/" + user_id + "/") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 20) # default count is 20 name_appendix = ord('a') is_public = False ## Test to make sure at least one is public is_private = False # none should be private for goal in data['goals']: if (goal['public']): is_public = True else: is_private = True self.assertTrue(goal['active']) # by default they're all active self.assertEqual(goal['name'], "test goal " + chr(name_appendix)) # by default sort by name ASC name_appendix = name_appendix + 1 self.assertTrue(is_public) self.assertFalse(is_private) public_goals = db.session.query(Goal).all() for goal in public_goals: goal.update({"public": False}) res = self.test_client.get("/goals/users/" + user_id + "/") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 0) # No public goals ## Count works public_goals = db.session.query(Goal).all() for goal in public_goals: goal.update({"public": True}) res = self.test_client.get("/goals/users/" + user_id + "/?count=3") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 3) # default count is 20 name_appendix = chr(ord('a') - 1) for goal in data['goals']: self.assertTrue(goal['active']) # by default they're all active new_appendix = goal['name'].replace("test goal ", "") self.assertTrue(ord(new_appendix) > ord(name_appendix)) name_appendix = new_appendix ## Sort rejects invalid inputs res = self.test_client.get("/goals/users/" + user_id + "/?sort=banana") setup.assertBadData(self, res, "sort can only be one of") ## Sort works ## Sort works -- and it should always give active first, then inactive active_goals = db.session.query(Goal).order_by(func.rand()).limit(15) for goal in active_goals: goal.update({"active": False}) res = self.test_client.get("/goals/users/" + user_id + "/?sort=created") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 20) # default count is 20 is_active = False is_inactive = False test_created = str(datetime.datetime(1970, 1, 1, 0, 0, 0)) for goal in data['goals']: if (goal['active']): is_active = True if (not goal['active']): if (not is_inactive): test_created = str(datetime.datetime(1970, 1, 1, 0, 0, 0)) # reset the sort on the first inactive goal is_inactive = True self.assertTrue(goal['created'] > test_created) test_created = goal['created'] self.assertTrue(is_active) self.assertTrue(is_inactive) ## Sort order rejects invalid inputs res = self.test_client.get("/goals/users/" + user_id + "/?sort_order=banana") setup.assertBadData(self, res, "sort_order must be either") ## Sort_order works res = self.test_client.get("/goals/users/" + user_id + "/?sort=created&sort_order=desc") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 20) # default count is 20 is_active = False is_inactive = False test_created = str(datetime.datetime(2100, 1, 1, 0, 0, 0)) for goal in data['goals']: if (goal['active']): is_active = True if (not goal['active']): if (not is_inactive): test_created = str(datetime.datetime(2100, 1, 1, 0, 0, 0)) # reset the sort on the first inactive goal is_inactive = True self.assertTrue(goal['created'] < test_created) test_created = goal['created'] self.assertTrue(is_active) self.assertTrue(is_inactive) ## Offset works min_name = db.session.query(Goal).filter_by(active=True).order_by(Goal.name).first().name res = self.test_client.get("/goals/users/" + user_id + "/?offset=1") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 20) # default count is 20 self.assertTrue(data['goals'][0]['name'] > min_name) ## Count, sort, sort order, and offset all work together max_created = db.session.query(Goal).filter_by(active=True).order_by(Goal.created.desc()).first().created res = self.test_client.get("/goals/users/" + user_id + "/?offset=1&count=1&sort=created&sort_order=desc") setup.assertOk(self, res) data = json.loads(res.data) self.assertIn("goals", data) self.assertEqual(len(data['goals']), 1) self.assertTrue(data['goals'][0]['created'] < str(max_created))
class PasswordResets(db.Model): __tablename__ = 'password_resets' user_id = Column(Integer, ForeignKey('users.id'), primary_key=True) key = Column(String(26), nullable=False) created = Column(DateTime, nullable=False, default=now) class Quote(db.Model): __tablename__ = 'quotes' __table_args__ = {'mysql_engine': 'InnoDB', 'sqlite_autoincrement': True} id = Column(Integer, nullable=False, primary_key=True) body = Column(Text, nullable=False) notes = Column(Text, nullable=True) rating = Column(Integer, nullable=False, default=0) votes = Column(Integer, nullable=False, default=0) submitted = Column(DateTime, nullable=False, default=now) status = Column(Integer, nullable=False, default=0) score = Column(DOUBLE(unsigned=True), nullable=False, default=1) tags = relationship("Tag", secondary=QuoteToTag) submitted_by = relationship("User", secondary=QuoteToUser, uselist=False) voters = relationship("VoteToUser") AREA_ORDER_MAP = { 'best': Quote.rating.desc(), 'worst': Quote.rating, 'random': func.rand() } DEFAULT_ORDER = Quote.submitted.desc()
def excute(self,page): times = datetime.datetime.now().strftime("%Y-%m-%d") # content = self.getContent('http://www.appgame.com/?json=get_date_posts&date=2016-04-14&count=2') if page==1: content = self.getContent('http://www.appgame.com/?json=get_date_posts&date='+times) else: content = self.getContent('http://www.appgame.com/?json=get_date_posts&date='+times+'&page='+str(page)) content = json.loads(content) page_num = content['pages'] for i in content['posts']: with self.app.app_context(): urls = WpDataoptimumUrls.query.filter(WpDataoptimumUrls.url==i['url']).first() if urls is None: val = WpDataoptimumPlayContentAuto.query.filter(WpDataoptimumPlayContentAuto.url==i['url']).first() if val is None: num = randint(2, 5) #select user list_user = WpPosts.query.outerjoin(WpTermRelationships,WpPosts.ID==WpTermRelationships.object_id).filter(( WpTermRelationships.term_taxonomy_id.in_([152,159,161]))).with_entities(WpPosts.ID,'post_title','post_type',WpTermRelationships.term_taxonomy_id).order_by(func.rand()).limit(num).all() #select content list_content = WpDataoptimumRecord.query.filter((WpDataoptimumRecord.category.like('%152%'))|(WpDataoptimumRecord.category.like('%159%'))|(WpDataoptimumRecord.category.like('%161%'))).order_by(func.rand()).limit(num).all() lists = {} if len(list_user) == len(list_content): for index in range(num): lists[index]={"username":list_user[index].post_title,"content":list_content[index].comments} #set time list_time = [] startDate = datetime.datetime.now() for x in list(self.random_date(startDate,num)): list_time.append(x.strftime("%Y-%m-%d %H:%M:%S")) for index2 in lists: me = WpDataoptimumPlayContentAuto(lists[index2]['username'],lists[index2]['content'],i['url'],list_time[index2]) db.session.add(me) db.session.commit() if page<page_num: page += 1 self.excute(page)
def batch_crawl_publications(): """ Crawls Google Scholar in order to retrieve information about publications in batches. """ FETCH_LIMIT = 1000 SLEEP_TIME = 10 while True: publications = Publication.query.filter(Publication.scholar_id != None, Publication.retrieved_at == None).options(load_only('scholar_id')).order_by(func.rand()).limit(FETCH_LIMIT).all() count = 0 for publication in publications: url = url_for('crawl_publication', _external=True) data = urlencode({'scholar_id': publication.scholar_id}) req = Request(url, data) res = urlopen(req, timeout=30) count += 1 time.sleep(SLEEP_TIME) print 'Crawled ' + str(count) + ' publications.' if count == 0: break return 'Done.'
# A typical guess for exp as (a, l) guess = (1.0e-5, -5.) def exp(x, a, l): """A simple function to use for curve fitting""" return a * np.exp(10.**l * x) # Open a SQL session session = Session() # Query for the first output file of each simulation q = session.query(Simulation).order_by(func.rand()) # Add additional filters as desired q = q.filter(Simulation.compositional_stratif_param == -3.0) q = q.filter(Simulation.z_extent_of_the_box == 25.0) goal_tags = ["linear_growth", "linear_amplitude", "linear_kx", "linear_ky", "linear_kz"] for sim in q.all(): # Check if simulation results are already in the database tags = {tag.name: tag for tag in sim.tags} found = True for tag in goal_tags: if tag not in tags: found = False
def get_curated_products(category, page=1, num_per_page=10): session = get_a_session() page=int(page) num_per_page=int(num_per_page) products = session.query(SantaAmazonProduct).filter_by(curated=True).filter(or_(SantaAmazonProduct.keywords == category, SantaAmazonProduct.keywords == 'both' )).order_by(func.rand()).limit(num_per_page).all() return products
def artists(self, sort=None, filter=None, filter_value=None, page=None): if sort is None: sort = "created" if filter is None: filter = "none" if filter_value is None: filter_value = "" if page is None: page = 1 page = int(page) page_size = 24 offset = page_size * (page - 1) query = (get_database() .query(Artist) .join(Track, Artist.id == Track.artist_id) .filter(Track.scanned) .group_by(Artist.id)) if sort == "created": query = query.order_by(Artist.created.desc()) elif sort == "updated": query = query.order_by(Artist.updated.desc()) elif sort == "random": query = query.order_by(func.rand()) page = None if filter == "yours": remotes_user = remotes.get_user(cherrypy.request.user) artist_ids = [] if remotes_user is not None and remotes_user['lastfm'] is not None: for artist in remotes_user['lastfm']['top_artists_overall']: artist_results = search.query_artist(artist['name'], exact=True) if len(artist_results) > 0: artist_ids.append(artist_results[0].id) query = query.filter(Artist.id.in_(artist_ids)) elif filter == "invalid": query = query.filter("invalid is not null") elif filter == "tag": artist_ids = [] if filter_value != "": remotes.update_tag(filter_value) remotes_tag = remotes.get_tag(filter_value) if remotes_tag is not None and remotes_tag['lastfm'] is not None: for artist in remotes_tag['lastfm']['artists']: artist_results = search.query_artist(artist['name'], exact=True) if len(artist_results) > 0: artist_ids.append(artist_results[0].id) query = query.filter(Artist.id.in_(artist_ids)) total = query.count() pages = math.ceil(total / page_size) artists = query.limit(page_size).offset(offset).all() for artist in artists: remotes.update_artist(artist) return { 'artists': artists, 'page': page, 'page_size': page_size, 'total': total, 'sort': sort, 'filter': filter, 'filter_value': filter_value, 'pages': pages }
from models import * from db_connect import db_connect from sqlalchemy.orm import sessionmaker from sqlalchemy import func import smtplib from email.mime.text import MIMEText engine = db_connect('emailquotes', 'localhost') Session = sessionmaker(bind=engine) session = Session() message_template = "This is your daily dose of quote, courtesy of the McMaier Expedition:\n\n" sending_address = "*****@*****.**" for user in session.query(User).all(): #pick random quote from ones assigned to user rand_quote = session.query(Quote).filter(Quote.send_user == user).order_by(func.rand()).first() #retrive to address from quote to_address = rand_quote.send_user.addresses[-1].email_address #selects most recent address #construct message body msg_body = message_template + rand_quote.message msg = MIMEText(msg_body) msg['Subject'] = 'Your daily dose of quote' msg['From'] = sending_address msg['To'] = to_address smtp_server = smtplib.SMTP('localhost') smtp_server.sendmail(sending_address, [to_address], msg.as_string()) smtp_server.quit()
def numbers(): """Gives a neat summary of champion data. Returns: json: JSON formatted champion statistic summary """ # The most popular champion popular_champ = ( db.session.query(ChampionData) .order_by(ChampionData.num_seen.desc()) .first() ) # Gets the most popular champions popular_champs = ( db.session.query(ChampionData) .order_by(ChampionData.num_seen.desc()) .limit(15) .all() ) # Picks a random champion to analyze random_champ = ( db.session.query(ChampionData) .order_by(func.rand()) .first() ) # Gets the champion that wins the most winning_champ = ( db.session.query(ChampionData) .filter(ChampionData.num_seen > 10) .order_by(ChampionData.score.desc()) .first() ) # Gets the role of the champions who wins the most winning_champ_roles = ( db.session.query( Champion.role.label("role"), func.count(Champion.id).label("seen") ) .filter(Champion.champion_id == winning_champ.champion_id) .group_by(Champion.role).all() ) # Stats, Date Stats, Case Study of Popular or Highest Win Rate stats = { 'stats': { 'match_count': Match.query.count(), 'popular_champ': popular_champ.get_name(), 'popular_champ_kda': round(popular_champ.get_kda(), 2), 'random_champ': random_champ.get_name(), 'random_champ_role': random_champ.role.capitalize(), 'random_champ_seen': random_champ.num_seen, 'average_kills': round( db.session.query( func.avg(ChampionData.kills) ) .first()[0], 2 ), 'average_towers': round( db.session.query( func.avg(ChampionData.tower_score) ).first()[0], 2 ) }, 'champion_picks': { 'labels': [ champ.get_name() + " (" + champ.role.capitalize() + ")" for champ in popular_champs ], 'data': [champ.num_seen for champ in popular_champs], 'images': [champ.get_full_image() for champ in popular_champs] }, # Time graph of pick rate over a week, group by date picked 'winning_champ': { 'name': winning_champ.get_name(), 'role': winning_champ.role.capitalize(), 'image': winning_champ.get_full_image(), 'seen': winning_champ.num_seen, 'won': winning_champ.won * 100, 'assists': compile_sorted_champions( champ.get_compiled_weights("assists") ), 'kda': winning_champ.get_kda(), 'role_distribution': { 'labels': [ data.role.capitalize() for data in winning_champ_roles ], 'data': [data.seen for data in winning_champ_roles] } } } return jsonify(stats)
def albums(self, view=None, sort=None, filter=None, filter_value=None, page=None): if view is None: view = "covers" if sort is None: sort = "created" if filter is None: filter = "none" if filter_value is None: filter_value = "" if page is None: page = 1 page = int(page) page_size = 24 offset = page_size * (page - 1) query = (get_database() .query(Album) .join(Track, Album.id == Track.album_id) .filter(Track.scanned) .group_by(Album.id)) albums = [] if filter == "yours": remotes_user = remotes.get_user(cherrypy.request.user) album_ids = [] if remotes_user is not None and remotes_user['lastfm'] is not None: for album in remotes_user['lastfm']['top_albums_overall']: album_results = search.query_album(album['name'], exact=True) if len(album_results) > 0: album_ids.append(album_results[0].id) query = query.filter(Album.id.in_(album_ids)) elif filter == "1year": now = datetime.datetime.utcnow() query = query.filter(Album.created > now - datetime.timedelta(days=365)) elif filter == "va": query = (query.join(Artist, Artist.id == Track.artist_id) .having(func.count(distinct(Artist.id)) > 1)) elif filter == "invalid": query = query.filter("invalid is not null") elif filter == "tag": album_ids = [] if filter_value != "": remotes.update_tag(filter_value) remotes_tag = remotes.get_tag(filter_value) if remotes_tag is not None and remotes_tag['lastfm'] is not None: for album in remotes_tag['lastfm']['albums']: album_results = search.query_album(album['name'], exact=True) if len(album_results) > 0: album_ids.append(album_results[0].id) query = query.filter(Album.id.in_(album_ids)) # count before adding order_by() for performance reasons.. total = query.count() pages = math.ceil(total / page_size) if sort == "created": query = query.order_by(Album.created.desc()) elif sort == "updated": query = query.order_by(Album.updated.desc()) elif sort == "seen": query = (query.outerjoin(UserAndAlbum, and_(Album.id == UserAndAlbum.album_id, UserAndAlbum.user_id == cherrypy.request.user.id)) .order_by(UserAndAlbum.seen.desc()) .order_by(Album.updated.desc())) elif sort == "date": query = (query .order_by(Album.date.desc()) .order_by(Album.updated.desc())) elif sort == "random": query = query.order_by(func.rand()) page = None albums = query.limit(page_size).offset(offset).all() for album in albums: remotes.update_album(album) for artist in album.artists: remotes.update_artist(artist) return { 'albums': albums, 'page': page, 'page_size': page_size, 'total': total, 'sort': sort, 'filter': filter, 'filter_value': filter_value, 'pages': pages, "view": view }
def zufaelliger_satz(): try: satz = engine.execute(db_satz.select().where((db_satz.c.tmp == False) & (db_satz.c.pro >= db_satz.c.kontra)).order_by(func.rand()).limit(1)).fetchone() uid = satz.uid except: print("Fehler: Es konnte kein zufälliger Satz aus der Datenbank geladen werden.") uid = '' redirect('/' + uid)