def get_quiz_question(): print('\n\nGET quiz hit:') data = request.get_json() print(data) try: if data['quiz_category']['id']: questions = Question.query.filter( ~(Question.id.in_(data['previous_questions']))).filter( Question.category == data['quiz_category'] ['id']).order_by(func.random()).first() else: questions = Question.query.filter( ~(Question.id.in_(data['previous_questions']))).order_by( func.random()).first() if questions is not None: selected_ques = questions.format() else: selected_ques = False # abort(412) #this doesnt work on front end but i think it should have print(selected_ques) return jsonify({"success": True, 'question': selected_ques}) except PreconditionFailed as e: print(sys.exc_info(), e) abort(412) except e: print(sys.exc_info(), e) abort(500)
def get_vocab(): # The params single_word_value = request.args.get("single_word_value") # Grab a random meaning rand_m = Meaning.query.order_by(func.random()).first() # Put all of the words in to a string db_words = Word.query.filter_by(link_id=rand_m.id).all() c_loop = 1 word_string = "" for w in db_words: word = str(w.w_text) word_string += word if c_loop == 1 else ", " + word c_loop += 1 # Add all of the data to a dictionary data = {} data["words"] = word_string data["meaning"] = rand_m.m_text # Return single word if needed if single_word_value == "true": data["single_word"] = (Word.query.filter_by(link_id=rand_m.id).order_by(func.random()).first()).w_text # Return the dictionary return jsonify(data)
def get_rand_pessoas(db: Session, qtde: dict) -> t.List[schemas.Pessoa]: ''' Busca Pessoas aleatoriamente baseado no tipo de papel Entrada: dict {papel: quantidade} Saída: Lista de Esquemas de Pessoas Exceções: Papel não encontrado : Pessoas não Encontradas ''' for key in qtde: if key == "aliado": pessoasAliado = db.query(models.Pessoa)\ .filter(models.Pessoa.aliado == True)\ .order_by(func.random())\ .limit(qtde[key])\ .all() elif key == "colaborador": pessoasColab = db.query(models.Pessoa)\ .filter(models.Pessoa.colaborador == True)\ .order_by(func.random())\ .limit(qtde[key])\ .all() else: raise HTTPException(status_code=404, detail="papel não encontrado") pessoas = pessoasAliado + pessoasColab if not pessoas: raise HTTPException(status_code=404, detail="pessoas não encontradas") return pessoas
def try_new_dishes(): dishes_to_try = [] body = request.get_json() if body is None: abort(400) previous_dishes = body.get('previous_dishes') if len(previous_dishes) == 0: abort(400) new_category = body.get('new_category') if len(new_category) == 0: abort(400) try: if new_category == 0: dishes = Dish.query.filter( Dish.id.notin_(previous_dishes), Dish.rating >= 3 ).order_by(func.random()).limit( 1 ) # if not specify a category, then recommend a dish that is not from previous dishes and ratings is greater than or equal to 3. for d in dishes: dish = get_formatted_dish(d.id) dishes_to_try.append(dish) else: dishes = Dish.query.filter( Dish.category_id == new_category, Dish.id.notin_(previous_dishes), Dish.rating >= 3 ).order_by(func.random()).limit( 1 ) # If specify a category, then recommend a dish that is from this category, not from previous dishes and ratings is greater than or equal to 3. for d in dishes: dish = get_formatted_dish(d.id) dishes_to_try.append(dish) return jsonify({"success": True, "dish to try": dishes_to_try[0]}) except Exception: abort(422)
def quickgame(cat_id, count, options): count = int(count) options = int(options) cat = Category.query.get(cat_id) tweets = cat.tweets_query.order_by(func.random()).limit(count).all() user_ids = [tweet.user_id for tweet in tweets] real_users = [TwitterUser.query.get(uid) for uid in user_ids] fake_users = [] for tweet in tweets: fake_user = cat.users_query.filter( TwitterUser.id != tweet.user_id).order_by( func.random()).limit(options - 1).all() fake_users.append(fake_user) game = zip(tweets, real_users, fake_users) game_list = [{ 'tweet': tweet.text, 'options': random.sample([ *[{ 'handle': fake.username, 'name': fake.name, 'photo': fake.profile_image_url, 'real': False } for fake in fakes], { 'handle': real.username, 'photo': real.profile_image_url, 'name': real.name, 'real': True } ], options) } for (tweet, real, fakes) in game] return jsonify(game_list)
def search_events(request, sse, search_type, search_query): try: search_type = str(search_type).encode().lower() types = set(['location', 'category']) if search_type not in types: return jsonify(error=True, message='search type is unknown/invalid: ' + search_type) search_query = str(search_query).encode().lower().replace( '_space_', ' ').replace('%20', ' ') query = '%' + str(cgi.escape(search_query)).encode().lower() + '%' if search_type == 'location': events = db_session.query(Events) \ .filter( func.lower(Events.location).like( query ) ) \ .order_by(func.random()) \ .limit(10).all() if search_type == 'category': events = db_session.query(Events) \ .filter( func.lower(Events.categories).like( query ) ) \ .order_by(func.random()) \ .limit(10).all() return jsonify(message='events', events=[e.serialize_small for e in events]) except Exception as err: print(err) return jsonify(error=True, errorMessage=str(err), message='error processing...')
def output_json(): gender = request.args.get('gender', '') minimum_age = request.args.get('minimum_age', 0) maximum_age = request.args.get('maximum_age', 99) if gender == '': db_output = ImageRecord.query.filter( ImageRecord.age >= minimum_age, ImageRecord.age <= maximum_age).order_by( func.random()).first_or_404() if gender != '': db_output = ImageRecord.query.filter( ImageRecord.gender == gender, ImageRecord.age >= minimum_age, ImageRecord.age <= maximum_age).order_by( func.random()).first_or_404() dict_output = { 'gender': db_output.gender, 'age': db_output.age, 'filename': db_output.filename, 'date_added': db_output.date_added, 'source': db_output.source, 'image_url': db_output.image_url(), 'last_served': db_output.last_served } db_output.last_served = datetime.utcnow() db.session.commit() return jsonify(dict_output)
def get_league_opponent(my_elo, league_id, request_user): opponent = LeagueUserPropertyModel.query. \ filter(LeagueUserPropertyModel.league_id == league_id). \ filter(LeagueUserPropertyModel.user_id != request_user.id). \ filter(LeagueUserPropertyModel.hero_party != None). \ filter(LeagueUserPropertyModel.elo <= my_elo + 100). \ filter(LeagueUserPropertyModel.elo >= my_elo - 100). \ order_by(func.random()).\ first() count = 2 while opponent is None: opponent = LeagueUserPropertyModel.query. \ filter(LeagueUserPropertyModel.league_id == league_id). \ filter(LeagueUserPropertyModel.user_id != request_user.id). \ filter(LeagueUserPropertyModel.hero_party != None). \ filter(LeagueUserPropertyModel.elo <= my_elo + 100 * count). \ filter(LeagueUserPropertyModel.elo >= my_elo - 100 * count). \ order_by(func.random()). \ first() count += 1 if count == 10: break return opponent
def get_next_question(): try: body = request.get_json() previous_questions = body.get('previous_questions') quiz_category = body.get('quiz_category') if quiz_category == 0: # When ALL is select, start next question from a random category next_questions = Question.query.filter( Question.id.notin_(previous_questions)).order_by( func.random()).limit(1) else: next_questions = Question.query.filter( Question.category == quiz_category, Question.id.notin_(previous_questions)).order_by( func.random()).limit(1) formatted_next_questions = [ question.format() for question in next_questions ] if len(formatted_next_questions) > 0: return jsonify({ "success": True, "question": formatted_next_questions[ 0], # the first element from the question list "previousQuestions": previous_questions, "guess": '', "showAnswer": False }) else: return jsonify({"success": False}) except: abort(400)
def get_vocab(): # The params single_word_value = request.args.get('single_word_value') # Grab a random meaning rand_m = Meaning.query.order_by(func.random()).first() # Put all of the words in to a string db_words = Word.query.filter_by(link_id=rand_m.id).all() c_loop = 1 word_string = "" for w in db_words: word = str(w.w_text) word_string += word if c_loop == 1 else ", " + word c_loop += 1 # Add all of the data to a dictionary data = {} data["words"] = word_string data["meaning"] = rand_m.m_text # Return single word if needed if (single_word_value == "true"): data["single_word"] = (Word.query.filter_by(link_id=rand_m.id).order_by(func.random()).first()).w_text # Return the dictionary return jsonify(data)
def update(): example.clear() example2.clear() for i in range(2): entry = session.query(Entry).order_by(func.random()).first() example.append(entry) entry2: EntryVar2 = session.query(EntryVar2).order_by( func.random()).first() example2.append(entry2)
def get_random_question(cls, previous_questions, category=None): if category is not None: return cls.query.filter(cls.category == category).filter( cls.id.notin_(previous_questions)).order_by( func.random()).first() else: return cls.query.filter( cls.id.notin_(previous_questions)).order_by( func.random()).first()
def random_prompt(prompt_model, field=None): prompt = '' if prompt_model: if field: row = prompt_model.query.order_by( func.random()).filter_by(alive=field).first() else: row = prompt_model.query.order_by(func.random()).first() prompt = row.prompt if row else ' ' return prompt
def play(): # selects two random stimuli first_stimulus = Stimulus.query.order_by(func.random()).first() second_stimulus = Stimulus.query.order_by(func.random()).first() # makes sure a stimulus isn't compared to itself. if the code is run with a single stimulus in the db this is an infinite loop while second_stimulus == first_stimulus: second_stimulus = Stimulus.query.order_by(func.random()).first() return render_template('play.html', first_stimulus = first_stimulus, second_stimulus=second_stimulus)
def get_random_user(): random_user = User.query.filter_by(isSelected=False).order_by( func.random()).first() while random_user == current_user: random_user = User.query.filter_by(isSelected=False).order_by( func.random()).first() if random_user == current_user: return None current_user.isJoined = True random_user.isSelected = True db.session.commit() return random_user
def get_random_tags(count=5): tags = session.query(Tag) \ .filter(Tag.yarns.any()) \ .order_by(func.random()) \ .limit(count) \ .all() return tags
def questions(): '''POST endpoint to get questions to play the quiz. - Request Arguments: category, previous questions - Returns: A random question ''' try: req_body = json.loads(request.data) previous_questions = req_body.get('previous_questions') quiz_category = req_body.get('quiz_category')['id'] except Exception: abort(400) queries = [] if quiz_category != 0: queries.append(Question.category == quiz_category) queries.append(not_(Question.id.in_(previous_questions))) try: question = Question.query.filter(*queries).order_by( func.random()).first() data = { 'question': question.format, } except AttributeError: data = {} except Exception: abort(500) finally: db.session.close() return generate_response(data=data)
def get_words(term, quantity=1): # if we haven`t received a term to search, randomize the words PopularDescription = Text popular_description_id = db.session.query(PopularDescription.id)\ .filter(PopularDescription.word_id == Word.id)\ .limit(1)\ .correlate(Word)\ .as_scalar() get_words = db.session.query(Word, Text, Language, Lexicographer)\ .join(Text, Text.id == popular_description_id)\ .join(Language, Language.id == Text.language_id)\ .join(Lexicographer, Lexicographer.id == Text.created_by_id)\ .with_entities(Word.id, Word.word, Text.timestamp, Text.text, Text.id.label("text_id"), \ Language.name, Language.code, Word.created_by_id, Lexicographer.username, \ Text.num_ratings, Language.color) if term is None: return get_words.offset( func.floor(func.random() * db.session.query(func.count(Word.id)))).limit(quantity) # otherwise use our term to search else: return get_words.filter(Word.word.ilike("%" + term + "%"))
def randomGetIp(targeturl): global session1, thread ip_port = "" flag = False while flag == False: count = session1.query(TIpPool).count() if count == 0: if thread == None: thread = threading.Thread(target=getip, args=(targeturl, 'ip.txt')) thread.start() while count == 0: session1.execute('reset query cache') count = session1.query(TIpPool).count() time.sleep(1) print("数量:" + str(count)) ipPool = session1.query(TIpPool).order_by( func.random()).limit(1).first() ip_port = ipPool.ip + ":" + ipPool.port flag = checkip(targeturl, ip_port) # print("ip_port:%s,验证结果:%s"%(ip_port,flag)) if flag == False: session1.delete(ipPool) session1.commit() return ip_port
def get_questions(selected_categories): #gets all the posts related to selected categories by user if selected_categories[0] == '': query = db.session.query(Post, Category).join(Category).order_by(func.random()).limit(10).all() else: query = db.session.query(Post, Category).join(Category).filter(Category.name.in_(selected_categories)).order_by(func.random()).limit(10) return query
def main(args: Args): mols = session.query(Molecule).filter_by(active=False).order_by( func.random()).limit(args.number) for mol in tqdm(mols, total=mols.count()): mol.testset = True session.commit()
def get_random_question(): data = request.get_json() print(data) questions = [] questions_len = 0 question = None quiz_category = data.get('quiz_category') prevQuestions = data.get('previous_questions') # check if quzi_category is none or previous_questions is none if((quiz_category is None) or (prevQuestions is None)): return abort(400) # if previous asked questions is greater than or equal to total asked questions if(len(prevQuestions) >= 5): return jsonify({ "success":True }) if(data.get('quiz_category') == 0): questions = Question.query.filter(Question.id.notin_(prevQuestions)).order_by(func.random()).all() else: questions = Question.query.filter(Question.category == quiz_category).filter(Question.id.notin_(prevQuestions)).order_by(func.random()).all() if(len(questions) >= 1): question = questions[0] return jsonify({ 'success':True, 'question':question.format() }) else: return jsonify({ "success":True })
def quiz(): previous = request.get_json()['previous_questions'] R = request.get_json()['quiz_category'] if R['type'] == 'click': Q = Question.query.filter(Question.id.notin_(previous)).order_by( func.random()).first() else: Ca = int(R['id']) Q = Question.query.filter(Question.category == Ca + 1).filter( Question.id.notin_(previous)).order_by(func.random()).first() if not Q: return jsonify({ 'success': False, }) x = Q.format() return jsonify({'success': True, 'question': x})
def tournament_json(): if request.method == 'POST': like(request.form.getlist('R')[0]) query = db.session.query(Resolution).order_by(func.random()).limit(10) resolutions = [r.resolved for r in query] return json.dumps(resolutions)
def get_next_card(cls, study_session_id, deck_id): session = StudySession.query.filter_by( id=study_session_id, user_id=g.user.id, deck_id=deck_id, state="Studying" ).first() if session is None: abort(404) study_logs = db.session.query(StudySessionLog.card_id).filter_by( study_session_id=session.id ) study_plan = db.session.query(StudyPlan).filter_by(user_id=g.user.id).first() ordering = func.random() if study_plan: if study_plan.order.value == "latest": ordering = Card.date_created.desc() elif study_plan.order.value == "oldest": ordering = Card.date_created.asc() card = ( db.session.query(Card) .filter( Card.state == "Active", Card.user_id == g.user.id, Card.deck_id == session.deck_id, ~(Card.id.in_(study_logs)), ) .order_by(ordering) .first() ) return card
def get_username_to_unfollow_random(self): """ Gets random username that is older than follow_time and has zero unfollow_count """ now_time = datetime.now() cut_off_time = now_time - timedelta(seconds=self.bot.follow_time) return self._session.query(Follower).filter(Follower.unfollow_count == 0) \ .filter(Follower.last_followed < cut_off_time) \ .order_by(func.random()).first()
def get_random_related_movies(user=None): not_seen = [] if user.is_authenticated: not_seen = get_seen_movies(user.id, -1) seen = get_seen_movies(user.id, 1) top_movs = uf.get_top_movies(user.id) prospects = seen + top_movs rand_index = random.randint(0, math.trunc((1.4 * len(prospects)) + 15)) if rand_index < len(prospects): movie_id = prospects[rand_index] else: movie_id = get_relevant_movie(not_seen) else: movie_id = get_relevant_movie() m1 = Movie.query.filter(Movie.id == movie_id).first() if random.randrange(0, 5) < 2: m2 = get_close_movie(m1.id, not_seen) return m1, m2 respons = requests.get('https://api.themoviedb.org/3/movie/' + str(m1.id) + '/recommendations?api_key=' + tmdb_key + '&language=en-US&page=' + random.choices("12", cum_weights=(0.65, 1.00))[0]) if respons.status_code != 200: return None ids = [r['id'] for r in json.loads(respons.text)['results']] m2 = Movie.query.filter(Movie.id.in_(ids), Movie.id.notin_(not_seen)).order_by( func.random()).first() if m2 == None: m2 = get_close_movie(m1.id, not_seen) return m1, m2
def parse_order_by(queryset, *field_names): """ TODO:add support for related fields Parse the order_by clause and return the modified query. This does not consider related tables at this time. """ for field in field_names: if field == "?": queryset.query = queryset.query.order_by(func.random()) continue if isinstance(field, int): if field < 0: order = desc field = -field else: order = asc queryset.query = queryset.query.order_by(order(field)) continue # evaluate the descending condition if "-" in field: order = desc field = field[1:] else: order = asc # old school django style for related fields if "." in field: # TODO: this is not accurate queryset.query = queryset.query.order_by(order(condition)) else: # normal order by queryset, parts = parse_joins(queryset, field) condition = reduce(lambda x, y: getattr(x, y), parts) queryset.query = queryset.query.order_by(order(condition)) return queryset
def get_quiz_questions(): body = request.get_json() previous_questions = body.get('previous_questions', None) quiz_category = body.get('quiz_category', None) current_question = None # If no category is specified if quiz_category.get('id') == 0: # If first question if len(previous_questions) == 0: current_question = Question.query.order_by( func.random()).first() else: random_questions = Question.query.order_by(func.random()) for question in random_questions: if question.id in previous_questions: continue else: current_question = question else: if len(previous_questions) == 0: current_question = Question.query.order_by( func.random()).filter( Question.category == quiz_category.get('id')).first() else: random_questions = Question.query.order_by( func.random()).filter( Question.category == quiz_category.get('id')) for question in random_questions: if question.id in previous_questions: continue else: current_question = question return jsonify({ "question": None if current_question is None else current_question.format(), "success": True })
def get_related_articles(self, count=5): from sqlalchemy.sql import func return g.db().query(Publication).filter( and_(Publication.id != self.id, Publication.portal_division_id.in_( db(PortalDivision.id).filter(PortalDivision.portal_id == self.division.portal_id)) )).order_by(func.random()).limit(count).all()
def get_candidate_to_follow(self) -> Follower: return self._session.query(Follower) \ .filter(Follower.last_followed.is_(None)) \ .filter(Follower.followed_back.is_(None)) \ .filter(Follower.filtered.is_(None)) \ .filter(Follower.is_private.is_(True)) \ .order_by(func.random()) \ .first()
def getRandomWriter(): aWriter = Writers.query.\ with_entities(Writers.id, Writers.name, Writers.slug).\ order_by(func.random()).limit(10).all() aJsonWriter = [] for oWriter in aWriter: aJsonWriter.append({'name': oWriter.name, 'slug': oWriter.slug}) return jsonify(writer=aJsonWriter), 200
def get_url(gender="", minimum_age=0, maximum_age=0, thumb=False): if gender == '': db_output = ImageRecord.query.filter( ImageRecord.age >= minimum_age, ImageRecord.age <= maximum_age).order_by( func.random()).first_or_404() if gender != '': db_output = ImageRecord.query.filter( ImageRecord.gender == gender, ImageRecord.age >= minimum_age, ImageRecord.age <= maximum_age).order_by( func.random()).first_or_404() db_output.last_served = datetime.utcnow() db.session.commit() if thumb == False: return db_output.image_url() if thumb == True: return db_output.thumb_url()
def random_row(): row = Message.query.order_by(func.random()).first() return { "id": row.id, "message": row.message, "likes": row.likes, "reports": row.reports }
def make_comment(post_type): Session = sessionmaker(bind=engine) db = Session() dead_comments = db.query(Comment.text) \ .filter(Comment.dead == True, Comment.text != None) \ .order_by(func.random()).limit(30000) dead_comment_sim = train_from_query(dead_comments, CommentSim) comment_query = db.query(Comment.text).filter(Comment.dead == False) if post_type != "normal": comment_query = comment_query.filter(Comment.id.in_( db.query(func.unnest(Story.all_kids)).filter_by(**queries[post_type]["query"]) )) comment_query = comment_query.order_by(func.random()).limit(60000) random_user_query = db.query(Comment.by).order_by(func.random()) comment_sim = train_from_query(comment_query, CommentSim) user_names = (by[0] for by in random_user_query.limit(random.randint(0, 50))) comments = [] for user_name in user_names: is_dead = random.randint(2, 100) < 5 sim = comment_sim if not is_dead else dead_comment_sim comment_length, comment = random.randint(0, 200), "" while len(comment) < comment_length: if (comment_length - len(comment)) < 25: break elif (comment_length - len(comment)) < 50: comment += sim.make_short_sentence(50, tries=10000, max_overlap_total=10, max_overlap_ratio=0.5) + "\n" else: comment += sim.make_short_sentence(100, tries=10000, max_overlap_total=10, max_overlap_ratio=0.5) + "\n" comment = comment.replace(".", ". ") comment_data = {"text": comment, "by": user_name, "dead": is_dead} comments.append(comment_data) return comments
def get_sticker_random(self, sticker_name: str): with self._session_scope() as session: query_data = session.query( Sticker.img_url, Sticker.local_save, Sticker.is_gif).filter(Sticker.name == sticker_name).order_by( func.random()).first() return query_data
def tournament_json(): if request.method == 'POST': like(request.form.getlist('R')[0]) query = db.session.query(Resolution ).order_by(func.random() ).limit(10) resolutions = [r.resolved for r in query] return json.dumps(resolutions)
def random_card(): row = None lu_try = 0 CARDS_TO_TRY = 10 #number of random cards to try before giving up while not row and lu_try < CARDS_TO_TRY: lu_try += 1 row = Card.query.order_by(func.random()).first() if 'cards' not in session or lu_try >= CARDS_TO_TRY: break if lu_try < 50 and row.id_string in session['cards']: row = None return redirect(url_for('card', card_id=row.id_string))
def random_link(): url = None while url is None: messages = session.query(Message).order_by(func.random()).limit(10).all() for message in messages: for url_ in URL_RE.findall(message.message): url = url_.split('|', 1)[0] if '|' in url_ else url_ break if url is not None: break return redirect(url)
def test_property_type(self): self.populate(self.session) menu = self.session.query(Menu).\ order_by(func.random()).first() self.assertEqual(menu.type, 'Menu') page = self.session.query(Page).\ order_by(func.random()).first() self.assertEqual(page.type, 'Page') section = self.session.query(Section).\ order_by(func.random()).first() self.assertEqual(section.type, 'Section') external_link = self.session.query(ExternalLink).\ order_by(func.random()).first() self.assertEqual(external_link.type, 'ExternalLink') internal_link = self.session.query(InternalLink).\ order_by(func.random()).first() self.assertEqual(internal_link.type, 'InternalLink')
def test_property_type(self): self.populate(self.session) menu_info = self.session.query(MenuInfo).\ order_by(func.random()).first() self.assertEqual(menu_info.type, 'MenuInfo') page_info = self.session.query(PageInfo).\ order_by(func.random()).first() self.assertEqual(page_info.type, 'PageInfo') section_info = self.session.query(SectionInfo).\ order_by(func.random()).first() self.assertEqual(section_info.type, 'SectionInfo') external_link_info = self.session.query(ExternalLinkInfo).\ order_by(func.random()).first() self.assertEqual(external_link_info.type, 'ExternalLinkInfo') internal_link_info = self.session.query(InternalLinkInfo).\ order_by(func.random()).first() self.assertEqual(internal_link_info.type, 'InternalLinkInfo')
def get_member(gender): """ Return the details of a randomly selected member of parliament. """ if not gender.lower() in ["male", "female"]: abort(400) gender_key = "M" if gender.lower() == "female": gender_key = "F" mp = MemberOfParliament.query.filter_by(gender=gender_key).order_by(func.random()).first() return send_api_response(mp.as_dict())
def active_exchanges(name=None, random=False): global _registry active_exchanges_query = Exchange.query.filter_by( active=True).join(Currency, Exchange.currency_from).filter_by( active=True) if _registry is None: if not name: _registry = active_exchanges_query.all() elif random: _registry = active_exchanges_query.filter_by( name=name).order_by(func.random()).first() else: _registry = active_exchanges_query.filter_by(name=name).all() return _registry
def get_questions(group_id): contents = [] entries = Entry.query.filter_by(group_id=group_id).order_by( func.random()).limit(10).all() for e in entries: lines = e.notes.split('---')[0].split('\n') for i in xrange(0, 10): # strip leading spaces & bullets line = random.choice(lines).lstrip(' +') if (len(line) >= 16) or ('**' in line): contents.append({ 'id': e.id, 'title': e.title, 'creator': e.creator, 'clue': line }) break return json.dumps(contents)
def create_run_batches(self, args): '''Creates job descriptions for parallel computing. Parameters ---------- args: tmlib.workflow.jterator.args.BatchArguments step-specific arguments Returns ------- generator job descriptions ''' channel_names = [ ch.name for ch in self.project.pipe.description.input.channels ] if args.plot and args.batch_size != 1: raise JobDescriptionError( 'Batch size must be 1 when plotting is active.' ) with tm.utils.ExperimentSession(self.experiment_id) as session: # Distribute sites randomly. Thereby we achieve a certain level # of load balancing in case wells have different number of cells, # for example. sites = session.query(tm.Site.id).order_by(func.random()).all() site_ids = [s.id for s in sites] batches = self._create_batches(site_ids, args.batch_size) for j, batch in enumerate(batches): image_file_locations = session.query( tm.ChannelImageFile._location ).\ join(tm.Channel).\ filter(tm.Channel.name.in_(channel_names)).\ filter(tm.ChannelImageFile.site_id.in_(batch)).\ all() yield { 'id': j + 1, # job IDs are one-based! 'site_ids': batch, 'plot': args.plot }
def index(self): c.users = Session.query(User). \ filter(User.avatar!=None). \ options(orm.joinedload(User.avatar)). \ order_by(func.random()). \ limit(8). \ all() c.one = choice(c.users) if 'user' in session: c.avatar_guesses = Session.query(AvatarGuess). \ options(orm.joinedload(AvatarGuess.guessed_avatar_)). \ filter(AvatarGuess.user==session['user']). \ order_by(AvatarGuess.created_at.desc()). \ limit(25) if 'ajax' in request.params: return render('/guess-avatar/choice.mako') else: c.title = 'ugani avatar' return render('/guess-avatar/index.mako')
def test_standalone(self): table1 = self.tables.people # no special alias handling even though clause is not in the # context of a FROM clause self.assert_compile( tablesample(table1, 1, name='alias'), 'people AS alias TABLESAMPLE system(:system_1)' ) self.assert_compile( table1.tablesample(1, name='alias'), 'people AS alias TABLESAMPLE system(:system_1)' ) self.assert_compile( tablesample(table1, func.bernoulli(1), name='alias', seed=func.random()), 'people AS alias TABLESAMPLE bernoulli(:bernoulli_1) ' 'REPEATABLE (random())' )
def get(self, id): comic = None if isinstance(id, int): comic = ComicModel.query.get(id) elif isinstance(id, basestring): if id == 'newest': comic = ComicModel.query.order_by( ComicModel.id.desc()).limit(1).first() elif id == 'oldest': comic = ComicModel.query.order_by( ComicModel.id).limit(1).first() elif id == 'random': # XXX - This call may change depending on the database # being used. comic = ComicModel.query.order_by( func.random()).limit(1).first() if comic is None: return 404, {} buf = ComicSchema().dump(comic).data buf['path'] = 'comics/%d.png' % comic.id return 200, buf
def get_random_movie(): return db.session.query(Movie.title).order_by(func.random()).limit(1).one()[0].decode('utf-8')
def random_movie(): try: return db.session.query(Movie.title).order_by(func.random()).limit(1).one()[0] except Exception: return ""
def get_random_excuse(cls, db): return db.query(cls).filter( cls.published==True ).order_by( func.random() ).first()
def retrieve_random_test(difficulty): return session.query(models.Test).filter(models.Test.difficulty == difficulty).order_by(func.random()).first()
def generate_word_sound(word): audio_obj = Session.query(Audio).filter_by(word=word).order_by(func.random()).first() rate, data = read(audio_obj.audio_file) sound = data[int(rate * audio_obj.start):int(rate * audio_obj.end + 1)] return sound
def get_featured_works(cls, count=20): return models.BlakeFeaturedWork.query.order_by(func.random()).limit(count)
def main(): Session = sessionmaker(bind=engine) sesh = Session() print("Setting all_comments") result = sesh.execute( """ CREATE OR REPLACE FUNCTION recurse_children(post_id INTEGER) RETURNS integer[] AS $$ WITH RECURSIVE recursetree(id, parent_id) AS ( SELECT id, parent_id FROM comments WHERE parent_id = post_id UNION SELECT t.id, t.parent_id FROM comments t JOIN recursetree rt ON rt.id = t.parent_id ) SELECT array(SELECT id FROM recursetree); $$ LANGUAGE SQL;""") #sesh.execute("UPDATE posts SET all_kids=recurse_children(id) WHERE is_ask=true OR is_tell=true OR is_show=true;") sesh.execute("COMMIT") sesh.execute("BEGIN") """sesh.query(Story) \ .filter(or_(Story.is_ask == True, Story.is_tell == True, Story.is_show == True))\ .filter(Story.all_kids == None)\ .update({"all_kids": func.recurse_children(Story.id)}, synchronize_session=False)""" print("Done") for post_type, info in queries.items(): query_args = info["query"] title_query = sesh.query(Story.title).filter_by(**query_args).order_by(func.random()) if post_type == "normal": title_query = title_query.filter(Story.score > 4) our_posts = [] print("Generating title for post type {type}".format(type=post_type)) title_sim = train_from_query(title_query, SubmissionTitleSim) with Timer() as t: for i in range(info["count"]): chosen_title = title_sim.make_sentence(tries=10000, max_overlap_total=10, max_overlap_ratio=0.5) domain, user = sesh.query(Story.host, Story.by).filter_by(**query_args).order_by(func.random()).limit( 1).one() votes = random.randint(1, 250) print(chosen_title + " ({domain})".format(domain=domain)) print("[{votes}] By {by}".format(votes=votes, by=user)) print() our_posts.append({ "id": len(created_posts) + len(our_posts), "title": chosen_title, "host": domain, "by": user, "votes": votes, "comments": [], "type": post_type }) del title_sim print("Chosen title in {time:6.4f}".format(time=t.elapsed)) comments = [post_type] * len(our_posts) pool = multiprocessing.Pool(processes=len(comments) if len(comments) < 5 else 5) comment_data = pool.map(make_comment, comments) for i, comments in enumerate(comment_data): post = our_posts[i] print("Made {0} comments".format(len(comments))) last_indent = None while comments: comm = comments.pop() choice = random.randint(0, 10) if last_indent is None: last_indent = 0 elif choice < 1: # Reset last_indent = 0 elif choice < 4: # Stay where we are pass elif choice < 9: # Indent last_indent += 1 else: if last_indent: # Dedent last_indent -= 1 comm["indent"] = last_indent post["comments"].append(comm) created_posts.extend(our_posts) print("-" * 30) with open("data/posts.json", "w") as fd: json.dump(created_posts, fd)
def selectLocation(self, requirements, preferences, constraints): """Select all the locations based on the requirements, preferences and constraints Args: requirements: the Requirements namedtuple which represents the itinerary requirements preferences: the Preferences namedtuple which represents the preferences requirements constraints: the Constraints namedtuple which represents the constraints requirements Returns: Tuple containing the list of all the locations to be inserted in the slots, the meal locations and the evening locations ([morning/afternoon Locations], [meal Locations], [evening Locations]) Raises: - """ #Calculate the number of slots needed to be fulfilled (morning, afternoon, meal, evening) nSlots = requirements.days * 4 #calculate the kids locations, that needs to be inserted in the slots. 1/6 of the locations must be for kids nKids = 0 if requirements.kids: nKids = (nSlots/6) nSlots = nSlots - nKids #if the user requires freeTime or he is young, the system will not schedule some slots if requirements.freeTime or requirements.client.category == "young": nSlots = nSlots - requirements.days #list of the probabilities. Probabilities of: ['shopping', 'culture' 'gastronomy', 'nightlife'] probabilities = self.__calculateProbabilities(preferences) #calculate how many locations I need to select in the categories: ['shopping', 'culture' 'gastronomy', 'nightlife'] locationTypes = [0,0,0,0] pickTypes = [0,1,2,3] for i in range(0, nSlots): if locationTypes[2] == requirements.days: pickTypes = [0,1,3] probabilities = [(probabilities[0]*3)/4, (probabilities[1]*3)/4, 1-probabilities[0]-probabilities[1]] randomNumber = self.__random_pick(pickTypes, probabilities) locationTypes[randomNumber] = locationTypes[randomNumber] + 1 # Based on the previous calculations, the number of location that have to be picked is now known # Proceed to build the query, that corresponds to the application of the knowledge rules in the engine. i = 0 locations = [] meals = [] evening = [] categoryMapping = { 'culture': ('cultural', 'museum', 'historical'), 'shopping': ('shopping',), 'gastronomy': ('gastronomy',), 'nightlife': ('entertainment', 'amusement', 'performance') } # Consider the locations to be included (constraints) if len(constraints.include) > 0: q1 = Location.query.filter((Location.excludedCategory==None) | (Location.excludedCategory!=requirements.client.category)) q1 = q1.filter(Location.name.in_(constraints.include)) q1 = q1.order_by(Location.rating).order_by(func.random()).limit(len(constraints.include)) locations.extend(q1.all()) # Choose other locations from the database, based on preferences, requirements and constraints (the 'exclude' part) for preferenceType in preferences._fields: if locationTypes[i] > 0: q1 = Location.query.filter(Location.category.in_(categoryMapping[preferenceType]))\ .filter((Location.excludedCategory==None) | (Location.excludedCategory!=requirements.client.category)) if not preferenceType == "gastronomy": q1 = q1.filter(Location.category!='gastronomy') if requirements.client.quiet or requirements.client.category == "elderly": q1 = q1.filter_by(intensive=False) if len(constraints.exclude) > 0: q1 = q1.filter(~Location.name.in_(constraints.exclude)) q1 = q1.order_by(Location.rating).order_by(func.random()).limit(locationTypes[i]) if preferenceType == "nightlife": evening.extend(q1.all()) elif not preferenceType == "gastronomy": locations.extend(q1.all()) else: meals.extend(q1.all()) i = i + 1 # Consider the presence of kids (requirements) if nKids > 0: IDsToExclude = [] for location in locations: IDsToExclude.append(location.ID) q1 = Location.query.filter((Location.excludedCategory==None) | (Location.excludedCategory!=requirements.client.category)) q1 = q1.filter_by(forKids=True) if len(constraints.exclude) > 0: q1 = q1.filter(~Location.ID.in_(constraints.exclude)) # Eliminate the possibility of choosing the same location twice, given the presence of kids q1 = q1.filter(~Location.ID.in_(IDsToExclude)) q1 = q1.order_by(Location.rating).order_by(func.random()).limit(nKids) locations.extend(q1.all()) return (locations, meals, evening)
def get(self): return self.db.query().filter(*self.filters).order_by(func.random()).first()
def updateAllocation(self, settings, question_cap=DEFAULT_QUESTION_CAP): # Get all existing allocations from the DB and their questions allocsByType = dict() hist_sel = float(settings.get('hist_sel', '0')) if hist_sel > 0.001: allocsByType['historical'] = [] # Only get half the question cap if there's not much chance of the questions being used if hist_sel < 0.5 and 'question_cap_historical' not in settings: settings['question_cap_historical'] = int(settings.get('question_cap', DEFAULT_QUESTION_CAP)) / 2 if hist_sel < 0.999: # NB: Need to add rows for each distinct question type, otherwise won't try and assign them allocsByType['regular'] = [] allocsByType['template'] = [] # Fetch all existing allocations, divide by allocType for (dbAlloc, dbQn) in (Session.query(db.Allocation, db.Question) .join(db.Question) .filter(db.Allocation.studentId == self.student.studentId) .filter(db.Allocation.active == True) .filter(db.Allocation.lectureId == self.dbLec.lectureId)): if not(dbQn.active) or (dbAlloc.allocationTime < dbQn.lastUpdate): # Question has been removed or is stale dbAlloc.active = False else: # Still around, so save it if (dbAlloc.allocType or dbQn.defAllocType) in allocsByType: # NB: If hist_sel has changed, we might not want some types any more allocsByType[dbAlloc.allocType or dbQn.defAllocType].append(dict(alloc=dbAlloc, question=dbQn)) # Each question type should have at most question_cap questions for (allocType, allocs) in allocsByType.items(): questionCap = int(settings.get('question_cap_' + allocType, settings.get('question_cap', DEFAULT_QUESTION_CAP))) # If there's too many allocs, throw some away for i in sorted(random.sample(xrange(len(allocs)), max(len(allocs) - questionCap, 0)), reverse=True): allocs[i]['alloc'].active = False del allocs[i] # If there's questions to spare, and requested to do so, reallocate questions if len(allocs) == questionCap and self.reAllocQuestions: if self.targetDifficulty is None: raise ValueError("Must have a target difficulty to know what to remove") # Make ranking how likely questions are, based on targetDifficulty suitability = [] for a in allocs: if a['question'].timesAnswered == 0: # New questions should be added regardless suitability.append(1) else: suitability.append(1 - abs(self.targetDifficulty - float(a['question'].timesCorrect) / a['question'].timesAnswered)) ranking = sorted(range(len(allocs)), key=lambda k: suitability[k]) # Remove the least likely tenth for i in sorted(ranking[0:len(allocs) / 10 + 1], reverse=True): allocs[i]['alloc'].active = False del allocs[i] # Assign required questions randomly if len(allocs) < questionCap: query = Session.query(db.Question).filter_by(qnType='tw_questiontemplate' if allocType == 'template' else 'tw_latexquestion').filter_by(active=True) if allocType == 'historical': # Get questions from lectures "before" the current one targetQuestions = (Session.query(db.LectureQuestion.questionId) .join(db.Lecture) .filter(db.Lecture.plonePath.startswith(re.sub(r'/[^/]+/?$', '/', self.dbLec.plonePath))) .filter(db.Lecture.plonePath < self.dbLec.plonePath) .subquery()) query = query.filter(db.Question.questionId.in_(targetQuestions)) else: # Git questions from current lecture query = query.filter(db.Question.lectures.contains(self.dbLec)) # Filter out anything already allocated allocIds = [a['alloc'].questionId for a in allocs] if len(allocIds) > 0: query = query.filter(~db.Question.questionId.in_(allocIds)) # Give a target difficulty if self.targetDifficulty is not None: query = query.order_by(func.abs(round(self.targetDifficulty * 50) - func.round((50.0 * db.Question.timesCorrect) / db.Question.timesAnswered))) for dbQn in query.order_by(func.random()).limit(max(questionCap - len(allocs), 0)): dbAlloc = db.Allocation( studentId=self.student.studentId, questionId=dbQn.questionId, lectureId=self.dbLec.lectureId, allocationTime=datetime.datetime.utcnow(), allocType='historical' if allocType == 'historical' else None, ) Session.add(dbAlloc) allocs.append(dict(alloc=dbAlloc, question=dbQn, new=True)) Session.flush() for allocType, allocs in allocsByType.items(): for a in allocs: yield ( self._questionUrl(a['alloc'].publicId), allocType, a['question'], )
def random(cls): return Kitten.query.order_by(func.random()).first()
def get_random(): session = Session() return session.query(FeedItem).order_by(func.random()).first()