def count(id): logger.info(f'Adding task for id: {id}') session = Session() task = session.query(Tasks).filter_by(id=id).first() res = Results(address=task.address, words_count=0, http_status_code=0) try: scrpr = Scrapper(task.address) except: scrpr = None if scrpr: err = scrpr.get_page() if not err: task.http_status_code, matches = scrpr.count_matches() task.task_status = 'FINISHED' res = Results(address=task.address, words_count=matches, http_status_code=task.http_status_code) else: print(err) session.add(res) session.commit() logger.info(task) logger.info(res)
def before_request(): g.db = connect_db() Slot_Conf.set_db(g.db) Job.set_db(g.db) TimeInfo.set_db(g.db) slotStartEnd.set_db(g.db) Projects.set_db(g.db) Results.set_db(g.db) ProjNames.set_db(g.db) SlotTimes.set_db(g.db) ProjectTimes.set_db(g.db)
def move(index: int): game_state = json.loads(redis_client.get('game_state').decode()) moved = False if not game_state['win']: values = game_state['values'] empty_index = values.index('') movable_indexes = {empty_index - 4, empty_index + 4} if (empty_index + 1) % 4: movable_indexes.add(empty_index + 1) if (empty_index - 1) % 4 != 3: movable_indexes.add(empty_index - 1) if index in movable_indexes: values[empty_index] = values[index] values[index] = '' game_state['values'] = values moved = True game_state['move_count'] += 1 if values[-1] == '': last_value = values[0] win = True for value in values[1:-1]: if value < last_value: win = False break last_value = value game_state['win'] = win if win and DB_ACTIVE: result = Results(move_count=game_state['move_count']) db.session.add(result) db.session.commit() game_state = json.dumps(game_state) if moved: redis_client.set('game_state', game_state) return game_state
def upload(): """Upload Zipped Source""" if 'file' in request.files: filen = request.files['file'] _, extension = os.path.splitext(filen.filename.lower()) # Check for Valid ZIP if (filen and filen.filename and extension in settings.UPLD_ALLOWED_EXTENSIONS and filen.mimetype in settings.UPLD_MIME): filename = secure_filename(filen.filename) # Make upload dir if not os.path.exists(settings.UPLOAD_FOLDER): os.makedirs(settings.UPLOAD_FOLDER) # Save file zip_file = os.path.join(app.config['UPLOAD_FOLDER'], filename) filen.save(zip_file) # Get zip hash get_zip_hash = utils.gen_sha256_file(zip_file) # check if already scanned res = Results.query.filter(Results.scan_hash == get_zip_hash) if not res.count(): # App analysis dir app_dir = os.path.join(app.config['UPLOAD_FOLDER'], get_zip_hash + "/") # Make app analysis dir if not os.path.exists(app_dir): os.makedirs(app_dir) # Unzip utils.unzip(zip_file, app_dir) # Do scan scan_results = general_code_analysis([app_dir]) print "[INFO] Static Analysis Completed!" _, sha2_hashes, hash_of_sha2 = utils.gen_hashes([app_dir]) tms = datetime.datetime.fromtimestamp( time.time()).strftime('%Y-%m-%d %H:%M:%S') # Save Result print "[INFO] Saving Scan Results!" res_db = Results( get_zip_hash, [app_dir], sha2_hashes, hash_of_sha2, scan_results['sec_issues'], scan_results['good_finding'], scan_results['missing_sec_header'], scan_results['files'], scan_results['total_count'], scan_results['vuln_count'], [], [], tms, ) db_session.add(res_db) db_session.commit() return jsonify({ "status": "success", "url": "result/" + get_zip_hash }) return jsonify({"status": "error", "desc": "Upload Failed!"})
def handler(message): msg = pickle.loads(message.body) task_id = int(msg.get('task_id')) result = msg.get('result') task_item = Tasks.query.filter_by(id=task_id).first() result_item = Results(address=task_item.address, words_count=result.get('words_count', None)) try: status_code = int(result.get('status_code')) task_item.http_status = status_code result_item.http_status_code = status_code except: pass db.session.add(task_item) db.session.add(result_item) db.session.commit() print(task_item.address, result) return True
def create_question(): if not session.get('admin'): abort(400) form = Question_CreateForm() if form.validate_on_submit(): question = form.question.data select1 = form.select1.data select2 = form.select2.data select3 = form.select3.data select4 = form.select4.data if Questions.query.filter_by(id=1).first(): result = Results(question=question, select1=select1, select2=select2, select3=select3, select4=select4) db.session.add(result) que = Questions.query.filter_by(id=1).first() que.question = question que.select1 = select1 que.select2 = select2 que.select3 = select3 que.select4 = select4 db.session.add(que) flash('增加成功') return redirect(url_for('admin_coupon.question')) else: que = Questions(id=1, question=question, select1=select1, select2=select2, select3=select3, select4=select4) result = Results(question=question, select1=select1, select2=select2, select3=select3, select4=select4) db.session.add(que) db.session.add(result) flash('增加成功') return redirect(url_for('admin_coupon.question')) return render_template('create_question.html', form=form)
def dosearch(string,upage,total_hits): conn = ES('127.0.0.1:9200', timeout=3.5)#Connect to ES fq_fy = TermQuery() fq_fy.add('fy',string) fq_mc = TermQuery() fq_mc.add('mc',string) fq_nr = TermQuery() fq_nr.add('nr',string) bq = query.BoolQuery(should=[fq_fy,fq_mc,fq_nr]) #add highlight to the search key word h=HighLighter(['<font color="yellow">'], ['</font>'], fragment_size=100) page = int(upage.encode('utf-8')) if page < 1: page = 1 s=Search(bq,highlight=h,start=(page-1)*PAGE_SIZE,size=PAGE_SIZE,sort='gbrq') s.add_highlight("fy") s.add_highlight('mc') s.add_highlight('nr') results=conn.search(s,indices='law',doc_types='ws') #return the total hits by using a list object total_hits.append(results.total) #print total_hits[0] list=[] for r in results: if(r._meta.highlight.has_key("fy")): r['fy']=r._meta.highlight[u"fy"][0] if(r._meta.highlight.has_key('mc')): r['mc']=r._meta.highlight[u'mc'][0] if(r._meta.highlight.has_key('nr')): r['nr']=r._meta.highlight[u'nr'][0] res = Results() res.mc = r['mc'] res.fy = r['fy'] res.link = r['link'] res.lx = r['lx'] res.gbrq = r['gbrq'] res.nr = r['nr'][0:250] res.dp = r['dq'] list.append(res) return list
def landingpage(): if request.method == 'GET': return jsonify({'status':'failure', 'message':'Unsupported operation : GET'}) order = landing(request.form['startTime'],request.form['endTime'], request.form['budget'],request.form['location'],request.form['preference']) order.User = get_current_user() loc = request.form['location'] pref = request.form['preference'] db.session.add(order) db.session.commit() plans = plan.grabdata(loc) result = plans['yelp'] + "::" + plans['eventbrite']['text'] events = Results(result) events.user = get_current_user().username db.session.add(events) db.session.commit() return jsonify({'status':'success', 'data': plans, 'id': events.id}), 201
def slotsResults(slot_name): #today = time.strftime("%Y-%m-%d") slots = Slot_Conf.view('statistics/Slots',key=today) projects = ProjNames.view('statistics/projectsInSlot',key=[today,slot_name]) results = Results.view('statistics/slotsResults',key=[today,slot_name]) platforms = [] for s in slots: if s.slot==slot_name: platforms = s.platforms dopedict = {} for p in platforms: dopedict[p] = {} dopedict[p]['tests-result'] = [] dopedict[p]['build-result'] = [] for r in results: dopedict[r.platform][r.set].append(r.project) for n in projects: names = n.names keys = dict((n,k) for k,n in enumerate(names)) for p in platforms: dopedict[p]['tests-missing'] = filter(lambda x: x not in dopedict[p]['tests-result'],names) dopedict[p]['build-missing'] = filter(lambda x: x not in dopedict[p]['build-result'],names) resultsdict = {} resultsdict['unstarted']=[] resultsdict['unfinished']=[] for p in platforms: resultsdict['unstarted' ]+=dopedict[p]['build-missing'] resultsdict['unfinished']+=dopedict[p]['tests-missing'] finished = sorted(set(names)-set(resultsdict['unfinished']), key=keys.get) unstarted = list(set(resultsdict['unstarted'])) unfinished = list(set(resultsdict['unfinished'])-set(resultsdict['unstarted'])) data=[] data.append({"total":len(results),"projects":names,"slot":slot_name,"platforms":platforms,"dopedict":dopedict,"unstarted":unstarted,"unfinished":unfinished,"finished":finished}) data = json.dumps(data) return data
def api_polls(): if request.method == 'POST': # get the poll and save it in the database result = json.loads(request.data) print(result) print('session user id is: ' + str(session['user'])) for key, value in result.items(): r = Results(user_id=session['user'], skill=key, option=value) print(r.skill) db.session.add(r) db.session.commit() if not value: return jsonify({'error': 'value for {} is empty'.format(key)}) if 'user' in session: session.pop('user') logout_user() flash('Thanks for completing the survey!') return jsonify({'message': 'Survey was completed succesfully'}) else: # query the db and return all the polls as json # polls = Topics.query.join(Polls).all() all_polls = [{ 'title': 'Which side is going to win the EPL this season', 'options': [{ 'name': 'Arsenal', 'vote_count': None }, { 'name': 'Spurs', 'vote_count': None }] }, { 'title': 'Whos better liverpool or city', 'options': [{ 'name': 'Liverpool FC', 'vote_count': None }, { 'name': 'Manchester city', 'vote_count': None }] }] resp = jsonify(all_polls) print(resp) return resp
def slotResult(slot_name): slots = Slot_Conf.view('statistics/Slots', key=today) projects = ProjNames.view('statistics/projectsInSlot', key=[today, slot_name]) results = Results.view('statistics/slotsResults', key=[today, slot_name]) platforms = [] for s in slots: if s.slot == slot_name: platforms = s.platforms for p in projects: names = p.names dopedict = {} for p in platforms: dopedict[p] = {} dopedict[p]['tests-result'] = [] dopedict[p]['build-result'] = [] data = {} for n in names: data[n] = {} for p in platforms: data[n][p] = {} data[n][p]['tests-result'] = { "started": today + "T" + "00:00:00", "completed": today + "T" + "00:00:00" } data[n][p]['build-result'] = { "started": today + "T" + "00:00:00", "completed": today + "T" + "00:00:00" } for r in results: data[r.project][r.platform][r.set] = { "started": r.started, "completed": r.completed } data = json.dumps(data) return data
def save_to_db(self,const): # store in db, uses self.data Extract objects, iterate through and generate the appropriate injections for the db if const is "search_term": s_db = Search(date=timezone.now(),term=self.data[0].search_term) print "Adding %s data into db."% s_db s_db.save() for q in self.data: print q # save data around Search term for each Extract object in self.data # each Extract object has multiple links, get them all and associate to the created search term try: for url in q.job_urls: l_db = Links(search=s_db, link=url) l_db.save() # each Extract object has a single location, get it and associate it to search term if q.loc != "": loc_db = Location(city=q.city,state=q.state) loc_db.save() # each Extract object has a summary attribute that has all the data, modify the data pool to fit the parameters specified by user # and store the data in a Results table associated to its Search table summary = q.pool_summary(pos=self.pos, with_filter=self.with_filter, lower=self.lower, with_bigrams=self.with_bigrams) data = summary[('Word', 'Word_Count', 'POS_Tag')] for tup in data: w = str(tup[0]) c = tup[1] try: p = str(tup[2]) except IndexError: p = "" r_db = Results(search=s_db,location=loc_db,word=w,count=c,pos=p,is_bigram=self.with_bigrams) r_db.save() except: if q.loc != "": loc_db = Location(city=q.city,state=q.state) loc_db.save() r_db = Results(search=s_db,location=loc_db,word="N/A",count=0,pos="",is_bigram=False) r_db.save()
def getvidDetails(ids): """ Retrive title and image from YoutubeAPI Initially retrieves the title and image of the requested videos from the YoutubeAPI, then dumps the data onto the database for later use to display to the user Keyword Arguments: ids -- List of ids retrieved from the search in getvidId() """ print('Inside getviddetails') key = DEVELOPER_KEY region = "IN" url = "https://www.googleapis.com/youtube/v3/videos?part=snippet&id={ids}&key={api_key}" r = requests.get(url.format(ids=",".join(ids), api_key=key)) js = r.json() items = js["items"] for item in items: try: result = Results(vid_name=item["snippet"]["title"], vid_img=item["snippet"]["thumbnails"]["high"]["url"], vid_id=item["id"]) print(type(result)) dbresults.append(result) db.session.add(result) print('Added result to session') db.session.commit() print('DB Addition Success') except Exception as e: db.session.rollback() print("Unable to add item to database.") print(e) finally: db.session.close() yield item["id"], item["snippet"]["title"], item["snippet"]["thumbnails"]["high"]["url"]
def slotResult(slot_name): slots = Slot_Conf.view('statistics/Slots',key=today) projects = ProjNames.view('statistics/projectsInSlot',key=[today,slot_name]) results = Results.view('statistics/slotsResults',key=[today,slot_name]) platforms = [] for s in slots: if s.slot==slot_name: platforms = s.platforms for p in projects: names = p.names dopedict = {} for p in platforms: dopedict[p] = {} dopedict[p]['tests-result'] = [] dopedict[p]['build-result'] = [] data = {} for n in names: data[n] = {} for p in platforms: data[n][p]={} data[n][p]['tests-result']={"started":today+"T"+"00:00:00","completed":today+"T"+"00:00:00"} data[n][p]['build-result']={"started":today+"T"+"00:00:00","completed":today+"T"+"00:00:00"} for r in results: data[r.project][r.platform][r.set]={"started":r.started,"completed":r.completed} data = json.dumps(data) return data
def save_to_db(self, const): # store in db, uses self.data Extract objects, iterate through and generate the appropriate injections for the db if const is "search_term": s_db = Search(date=timezone.now(), term=self.data[0].search_term) print "Adding %s data into db." % s_db s_db.save() for q in self.data: print q # save data around Search term for each Extract object in self.data # each Extract object has multiple links, get them all and associate to the created search term try: for url in q.job_urls: l_db = Links(search=s_db, link=url) l_db.save() # each Extract object has a single location, get it and associate it to search term if q.loc != "": loc_db = Location(city=q.city, state=q.state) loc_db.save() # each Extract object has a summary attribute that has all the data, modify the data pool to fit the parameters specified by user # and store the data in a Results table associated to its Search table summary = q.pool_summary( pos=self.pos, with_filter=self.with_filter, lower=self.lower, with_bigrams=self.with_bigrams ) data = summary[("Word", "Word_Count", "POS_Tag")] for tup in data: w = str(tup[0]) c = tup[1] try: p = str(tup[2]) except IndexError: p = "" r_db = Results( search=s_db, location=loc_db, word=w, count=c, pos=p, is_bigram=self.with_bigrams ) r_db.save() except: if q.loc != "": loc_db = Location(city=q.city, state=q.state) loc_db.save() r_db = Results(search=s_db, location=loc_db, word="N/A", count=0, pos="", is_bigram=False) r_db.save()
def findTweets(request): if request.method == 'POST': searchQuery = request.POST.get('search') try: tmpModel = Results.objects.get(username=searchQuery) except: location = 'data/' + searchQuery + '_results.txt' results = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]] try: topic = 0 word = 0 with open(location, 'r') as F: for line in F: if word < 9: results[topic][word] = line.strip("\n") word += 1 else: word = 0 topic += 1 except: try: ts = 'data/' + searchQuery + '_tweets.csv' with open(ts, 'r') as F: pass except: # RETRIEVING TWEETS td.get_all_tweets(str(searchQuery)) try: location = 'data/' + searchQuery + '_cleaned.txt' with open(location, 'r') as F: pass except: # CLEANING DATA WITH TURKISH NLP nlpPath = 'basic/codes/NLPTurkish.py' os.system("python3 " + " " + nlpPath + " " + searchQuery) # RUNNING SPARK sparkPath = '/Users/k/Spark/bin/spark-submit' scriptPath = 'basic/codes/simpleApp.py' os.system(sparkPath + " --master local[4] " + scriptPath + " " + searchQuery) print("Last seconds...") time.sleep(2) location = 'data/' + searchQuery + '_results.txt' results = [[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0]] try: topic = 0 word = 0 with open(location, 'r') as fp: for line in fp: if word < 9: results[topic][word] = line.strip("\n") word += 1 else: word = 0 topic += 1 except: context = {} return render(request, 'searchPage.html', context) # SAVING FINDINGS TO DATABASE tmpModel = Results(username=searchQuery) tmpModel.t1w1 = results[0][0] tmpModel.t1w2 = results[0][1] tmpModel.t1w3 = results[0][2] tmpModel.t1w4 = results[0][3] tmpModel.t1w5 = results[0][4] tmpModel.t1w6 = results[0][5] tmpModel.t1w7 = results[0][6] tmpModel.t2w1 = results[1][0] tmpModel.t2w2 = results[1][1] tmpModel.t2w3 = results[1][2] tmpModel.t2w4 = results[1][3] tmpModel.t2w5 = results[1][4] tmpModel.t2w6 = results[1][5] tmpModel.t2w7 = results[1][6] tmpModel.t3w1 = results[2][0] tmpModel.t3w2 = results[2][1] tmpModel.t3w3 = results[2][2] tmpModel.t3w4 = results[2][3] tmpModel.t3w5 = results[2][4] tmpModel.t3w6 = results[2][5] tmpModel.t3w7 = results[2][6] tmpModel.save() # RETURNING REQUESTED PAGE context = {"searchQuery":searchQuery, "tmpModel":tmpModel} return render(request, 'findTweets.html', context)
def load_tournament_file(): path = os.environ['APP_PATH'] with open( path + '/src/jsons/tournaments.json' ) as f: data = json.load( f ) # casino cache so not to request for same casinos path_cache = os.environ['APP_PATH'] + '/src/jsons/casinos.json' if os.path.exists( path_cache ): with open( path_cache ) as f: cache = json.load(f) else: cache = {} for r in data: # Do not add these to Swap Profit if r['Tournament'].strip() == '' or \ 'satelite' in r['Tournament'].lower() or \ r['Results Link'] == False: continue trmnt = Tournaments.query.get( r['Tournament ID'] ) trmnt_name, flight_day = utils.resolve_name_day( r['Tournament'] ) start_at = datetime.strptime( r['Date'][:10] + r['Time'], '%Y-%m-%d%H:%M:%S' ) trmntjson = { 'id': r['Tournament ID'], 'name': trmnt_name, 'start_at': start_at, 'results_link': str( r['Results Link'] ).strip() } flightjson = { 'start_at':start_at, 'day': flight_day } if trmnt is None: casino = cache.get( r['Casino ID'] ) print("THIS CASINO", f'{casino}') trmntjson = { **trmntjson, 'casino': casino['name'], 'address': casino['address'].strip(), 'city': casino['city'].strip(), 'state': casino['state'].strip(), 'zip_code': str( casino['zip_code'] ).strip(), 'longitude': float( casino['longitude'] ), 'latitude': float( casino['latitude'] ) } # Create tournament trmnt = Tournaments( **trmntjson ) db.session.add( trmnt ) db.session.flush() # Create flight db.session.add( Flights( tournament_id=trmnt.id, **flightjson )) else: # Create flight db.session.add( Flights( tournament_id=trmnt.id, **flightjson )) db.session.commit() return True ''' { "api_token": 1 "tournament_id": 45, "tournament_buyin": 150, "users": { "*****@*****.**": { "place": 11, "winnings": 200 } } } ''' trmnt_data = {} print('hello') for index, r in df.iterrows(): # print('r', r) # Get the trmnt data that's in the first row if index == 0: # Check trmnt existance trmnt = Tournaments.query.get( r['Tournament ID'] ) print('trmnt.buy_in', trmnt.buy_in) if trmnt is None: return None, { 'error':'This tournament ID was not found: '+ str(r['Tournament ID']) } print('tournament', trmnt) trmnt.results_link = (os.environ['API_HOST'] + '/results/tournament/' + str(r['Tournament ID']) ) # Check to see if file was uploaded already entry = Results.query.filter_by( tournament_id = r['Tournament ID'] ).first() if entry is not None: return None, { 'error':'This tournament ID has already been uploaded: '+ str(trmnt.id) } # Swap Profit JSON trmnt_data = { 'api_token': utils.sha256( os.environ['API_TOKEN'] ), 'tournament_id': trmnt.id, 'tournament_buyin': trmnt.buy_in, 'users': {} } user_id = r['User ID'] or None # Add user to the Swap Profit JSON if user_id: user = Users.query.get( user_id ) if user is None: db.session.rollback() return None, { 'error':'Couldn\'t find user with ID: '+ str(user_id) } # Swap Profit JSON trmnt_data['users'][user.email] = { 'place': r['Place'], 'winnings': r['Winnings'] # 'user_id': user.id } # Add to PokerSociety database db.session.add( Results( tournament_id = trmnt_data['tournament_id'], user_id = user_id, full_name = r['Full Name'], place = r['Place'], nationality = r['Nationality'], winnings = r['Winnings'] )) # If no errors, commit all data db.session.commit() # swapprofit = Subscribers.query.filter_by(company_name='Swap Profit').first() # if swapprofit is None: # return 'Swap Profit not a subscriber' # resp = requests.post( # os.environ['SWAPPROFIT_API_HOST'] + '/results/update', # json=trmnt_data ) # print('resp', resp) return trmnt_data, { 'message': 'Results excel processed successfully' }
def result_post_method(request): """ Point to post data from quiz into results table in dictionary. Comes in as (sample of 3 mats, 1 question): <QueryDict: { u'overall': [u'{ "A": 0, "node_data": { "1001": { "buttonC": "0", "buttonB": "0", "buttonA": "1", "buttonD": "0" }, "1002": { "buttonC": "0", "buttonB": "0", "buttonA": "0", "buttonD": "1" }, "1003": { "buttonC": "0", "buttonB": "1", "buttonA": "0", "buttonD": "0" } }, "B": 2, "C": 3, "D": 8 }'], u'question0': [u'{ "1001": { "buttonC": "0", "buttonB": "0", "buttonA": "1", "buttonD": "0" }, "1002": { "buttonC": "0", "buttonB": "0", "buttonA": "0", "buttonD": "1" }, "1003": { "buttonC": "0", "buttonB": "1", "buttonA": "0", "buttonD": "0" } }'] }> Then processes the data into manageable pieces to be cross referenced with the correct answers in the quiz dictionaries. The device ids that are attached to each mat are also cross referenced to whatever student is linked to that device, and given a score in results. :param request: wsgi request :return: HTTP response """ quizname = request.GET.get('quizname') devices = Device.objects.all() quiz_obj = Quiz.objects.get(name=quizname) quizjson = json.loads(quiz_obj.quizjson) overall_dict = results_process_questions(quizjson) post_dict = request.POST results_dict = results_process_data(post_dict, overall_dict, devices) for device in devices: score = 0 for result in results_dict: if device.student.name in result['student']: score += int(result['score']) try: result = Results() result.quiz = quiz_obj result.student = device.student result.score = score result.save() except Exception: return HttpResponse("failure") return HttpResponse("success")
def write_to_db(name, value): result = Results(name=name, value=value) db.add(result)
def update_events(self, date_list): """ For all promotions in the database, search for new results and add to DB Parameters ---------- date_list : list List of dates to retrieve results for Returns ------- updated_shows :list Simple list of updated shows for use in notifications """ logging.info("Updating events") # Build updated_shows list used later for notifications updated_shows = [] logging.debug(Promotions.objects()) # Get list of promotions from database for promotion in Promotions.objects(): logging.info(f"Finding Events for {promotion.name}") logging.debug(f"Creating Promotion object for {promotion['name']}") # For each promotion, build a list of events events = self.get_events(promotion, date_list) if events: # Continue only if there are any events found for the promotion in the time frame logging.info(f"Found events for {promotion.name}") for event in events: # For each event, add the promotion name to its attributes event['promotion'] = promotion.name # Check whether the show already exists, based on the event name and date if not Results.objects(title=event['title'], date=event['date']): # If it doesn't already exist, save it to the db and add to the list of updated shows db_show = Results(**event).save() logging.info( f"Saved document ID {db_show.id} for {event['promotion']}, {event['title']}, {event['date']}" ) updated_shows.append(event['promotion'] + " - " + event['title']) else: # If show is already in the db, update the details update = Results.objects( title=event['title'], date=event['date']).update(**event, full_result=True) if update.modified_count > 0: logging.info( f"Updated DB entry for {event['promotion']}, {event['title']}, {event['date']}" ) else: logging.info( f"DB entry exists for {event['promotion']}, {event['title']}, {event['date']}" ) else: logging.info(f"No events found for {promotion.name}") # Create string of updated shows for notifications updated_shows = '\n'.join(updated_shows) return updated_shows
def results(request, query_id): """ :param request: :param query_id: :return: :raise: """ if request.user.is_authenticated(): reponseToPresent = [] categories_counter = [] positive_counter = 0 negative_counter = 0 neutral_counter = 0 try: ## Must store the response, if there is no response, otherwise return the stored one. ## IF NOT STORED query = Query.objects.get(id=query_id) query_params = Query_properties.objects.filter(query=query) results = Results.objects.filter(query=query) #run for all categories list_properties = get_query_properties(query) properties = list_properties["Properties"] # all the available properties, e.g. keywords, twitter, facebook #print "properties: %s" %properties phrases = list_properties["Phrases"] #print "phrases: %s" %phrases keywords = list_properties["Keywords"] #print "keywords: %s" %keywords twitter_usernames = list_properties["Twitter"] facebook_pages = list_properties["Facebook"] query_properties = '' # This is the string that forms the properties query (query_string) phrase_properties = '' # This is the string that forms the phrase query (match_phrase)' twitter_properties = '' facebook_properties = '' ## Run the query or bring the results from the Database if results: #bring it from the database response = results.__getitem__(0).results response = json.loads(response) else: #make a new query lang = Query_languages.objects.get(query=query_id) ##### # Get all the properties, keywords, phrases, twitter usernames ##### for kwrd in keywords.keys(): temp = '' for keyword_prop in keywords[kwrd]: temp += "%s," % keyword_prop if query.venn == 'OR': query_properties += '%s,' % remove_comma_at_the_end(temp) else: query_properties += '+(%s)' % remove_comma_at_the_end(temp) query_properties = query_properties.replace('+()', '') #Remove any empty keyword query_properties = remove_comma_at_the_end(query_properties) if query_properties != '': #if empty list, no properties, no query string, go to phrases if lang: if lang.language == "es": query_properties = '{"query_string":{"query":"%s","fields":["%s"]}}' % ( query_properties, "text_no_url_es") elif lang.language == "en": query_properties = '{"query_string":{"query":"%s","fields":["%s"]}}' % ( query_properties, "text_no_url") else: query_properties = '{"query_string":{"query":"%s","fields":["%s","%s"]}}' % ( query_properties, "text_no_url", "text_no_url_es") else: query_properties = '{"query_string":{"query":"%s","fields":["%s"]}}' % ( query_properties, "text_no_url") # Create the phrase query for phrase_list in phrases.keys(): for phrase in phrases[phrase_list]: if lang: if lang.language == "es": phrase_properties += '{"match_phrase":{"doc.text_no_url_es":"%s"}},' % phrase elif lang.language == "en": phrase_properties += '{"match_phrase":{"doc.text_no_url":"%s"}},' % phrase else: phrase_properties += '{"match_phrase":{"doc.text_no_url":"%s"}},{"match_phrase":{"doc.text_no_url_es":"%s"}},' % ( phrase, phrase) else: phrase_properties += '{"match_phrase":{"doc.text_no_url":"%s"}},' % phrase phrase_properties = remove_comma_at_the_end(phrase_properties) for twitter_username in twitter_usernames: twitter_properties += '{"match_phrase_prefix" : { "doc.user_screen_name":"twitter:%s" }},' % twitter_username.replace( " ", "").replace("@", "") twitter_properties = remove_comma_at_the_end(twitter_properties) for facebook_page in facebook_pages: facebook_properties += '{"match_phrase_prefix" : { "doc.user_screen_name":"facebook:%s" }},' % facebook_page.replace( " ", "") facebook_properties = remove_comma_at_the_end(facebook_properties) ### #query constructor ### query_all = '' if (query_properties != ''): query_all += '%s,' % query_properties if (phrase_properties != ''): query_all += '%s,' % phrase_properties if (twitter_properties != ''): query_all += '%s,' % twitter_properties if (facebook_properties != ''): query_all += '%s,' % facebook_properties query_all = remove_comma_at_the_end(query_all) query_all = '{"query":{"filtered":{"query":{"bool":{"should":[%s],"minimum_should_match" : 1}},"filter":{"bool":{"must":[{"range":{"doc.created_at":{"from":"%s","to":"%s"}}}],"_cache":true}}}},"from":0,"size":10000, "sort":["_score"]}' % ( query_all, int(time.mktime(query.from_date.timetuple()) * 1000), int(time.mktime(query.to_date.timetuple()) * 1000)) print query_all response = parse_query_for_sentiments(query_all) newResponse = Results(query=query, results=json.dumps(response), updated=datetime.now()) newResponse.save() ## count the occurrences of keywords in in response for property in properties.keys(): word_counter = [] r = re.compile("|".join(r"\b%s\b" % w.lower() for w in properties[property].split(",")), re.I) # temporary solution to double counting... number = Counter(re.findall(r, "")) for message in response: #dict_you_want = { "text": message["_source"]["doc"]["text"] } #print dict_you_want number = number + Counter(re.findall(r, (message["_source"]["doc"]["text"]).lower().replace("@", " ").replace("#", " "))) # for lala in properties[property].split(","): # print number[lala] # print lala for phrase in properties[property].split(","): # number = json.dumps(response).count(phrase) text = '{"name":"%s","times":%i, "sentiment":%i, "positive":%i, "negative":%i, "neutral":%i}' % ( phrase.lower(), number[phrase.lower()], 0, 0, 0, 0) #print text word_counter.append(json.loads(text)) text = {} text["category"] = property text["properties"] = word_counter categories_counter.append(text) for message in response: doc_text = message["_source"]["doc"]["text"] if message["_source"]["doc"]["senti_tag"] == "positive": #for pie diagram metrics positive_counter += 1 elif message["_source"]["doc"]["senti_tag"] == "negative": # for pie diagram metrics negative_counter += 1 elif message["_source"]["doc"]["senti_tag"] == "neutral": neutral_counter += 1 #if message["_score"] > 0.05: if True: reponseToPresent.append(message["_source"]) ##print "Just Added: %s" %message["_source"]["doc"] try: for category in categories_counter: r2 = re.compile("|".join(r"\b%s\b" % w["name"].lower() for w in category["properties"]), re.I) number2 = Counter(re.findall(r2, ( json.dumps(message["_source"]["doc"]["text"])).lower().replace("@", " ").replace("#", " "))) if True: for property in category["properties"]: if message["_source"]["doc"]["senti_tag"] == "positive": if (number2[property["name"].lower()]) > 0: property["sentiment"] = property["sentiment"] + 1 property["positive"] = property["positive"] + 1 elif message["_source"]["doc"]["senti_tag"] == "negative": if (number2[property["name"].lower()]) > 0: property["sentiment"] = int(property["sentiment"]) - 1 property["negative"] = property["negative"] + 1 elif message["_source"]["doc"]["senti_tag"] == "neutral": if (number2[property["name"].lower()]) > 0: property["neutral"] = property["neutral"] + 1 except: continue except ValueError: #print ValueError.message raise Http404() return render(request, "results.html", {"query_id": query.id, "query_name": query.name, "query": query_params, "response": reponseToPresent, "positive": positive_counter, "negative": negative_counter, "neutral": neutral_counter, "categories": categories_counter}) else: return HttpResponseRedirect("/")
def slotsResults(slot_name): #today = time.strftime("%Y-%m-%d") slots = Slot_Conf.view('statistics/Slots', key=today) projects = ProjNames.view('statistics/projectsInSlot', key=[today, slot_name]) results = Results.view('statistics/slotsResults', key=[today, slot_name]) platforms = [] for s in slots: if s.slot == slot_name: platforms = s.platforms dopedict = {} for p in platforms: dopedict[p] = {} dopedict[p]['tests-result'] = [] dopedict[p]['build-result'] = [] for r in results: dopedict[r.platform][r.set].append(r.project) for n in projects: names = n.names keys = dict((n, k) for k, n in enumerate(names)) for p in platforms: dopedict[p]['tests-missing'] = filter( lambda x: x not in dopedict[p]['tests-result'], names) dopedict[p]['build-missing'] = filter( lambda x: x not in dopedict[p]['build-result'], names) resultsdict = {} resultsdict['unstarted'] = [] resultsdict['unfinished'] = [] for p in platforms: resultsdict['unstarted'] += dopedict[p]['build-missing'] resultsdict['unfinished'] += dopedict[p]['tests-missing'] finished = sorted(set(names) - set(resultsdict['unfinished']), key=keys.get) unstarted = list(set(resultsdict['unstarted'])) unfinished = list( set(resultsdict['unfinished']) - set(resultsdict['unstarted'])) data = [] data.append({ "total": len(results), "projects": names, "slot": slot_name, "platforms": platforms, "dopedict": dopedict, "unstarted": unstarted, "unfinished": unfinished, "finished": finished }) data = json.dumps(data) return data
first_number = False while not first_number: my_string = input() if parse_expression(my_string): first_number, second_number, action = parse_expression( my_string) break else: continue our_example = Calculator(first_number, second_number, action) if our_example.calculate(): print("Your result is: ", round(our_example.calculate(), 4)) to_alchemy_results = Results(first_number, action, second_number, our_example.calculate(), our_user_id) alchemy_actions.add_res(to_alchemy_results) else: print("It's not possible to divide by zero!") print("Do you want to continue? (y/n)") while True: again = (input()) if again == "y": break elif again == "n": break else: print("Try again !")
def question(): """Let the user answer the trivia question.""" # "GET" method if request.method == "GET": # get trivia file from online API triviafile = getTrivia(Choice.query.get(1).choice) # create variables question, correct_answer, incorrect_answer1, incorrect_answer2, incorrect_answer3 = triviaItems(triviafile) # create shuffable variables for db answer1, answer2, answer3, answer4 = shuffle(correct_answer, incorrect_answer1, incorrect_answer2, incorrect_answer3) # if table is empty, insert values if Results.query.get(1) is None: # store question and answers into database result = Results(question, answer1, answer2, answer3, answer4, correct_answer) db.session.add(result) db.session.commit() # update the table otherwise else: Results.query.get(1).question = question Results.query.get(1).answer1 = answer1 Results.query.get(1).answer2 = answer2 Results.query.get(1).answer3 = answer3 Results.query.get(1).answer4 = answer4 Results.query.get(1).correct_answer = correct_answer db.session.commit() # query for question and results trivia = Results.query.get(1) return render_template('question.html', trivia=trivia) # "POST" method else: # create dictionary with correct answer and incorrect answers answerdict = {"answer1" : "incorrect", "answer2" : "incorrect", "answer3" : "incorrect", "answer4" : "incorrect"} if Results.query.get(1).correct_answer == Results.query.get(1).answer1: answerdict["answer1"] = "correct" elif Results.query.get(1).correct_answer == Results.query.get(1).answer2: answerdict["answer2"] = "correct" elif Results.query.get(1).correct_answer == Results.query.get(1).answer3: answerdict["answer3"] = "correct" elif Results.query.get(1).correct_answer == Results.query.get(1).answer4: answerdict["answer4"] = "correct" # if user is not logged in if session.get("user_id") == None: # correct answer if answerdict[request.form.get("answer")] == "correct": flash("Answer is correct!", "success") return redirect(url_for("proceed")) # incorrect answer else: flash("Answer is wrong!", "danger") return redirect(url_for("proceed")) # if user is logged in else: # correct answer if answerdict[request.form.get("answer")] == "correct": flash("Answer is correct! You have earned 10 points!", "success") # add 10 points to user's current score User.query.get(current_user.id).currentscore += 10 # update high score if broken if User.query.get(current_user.id).currentscore > User.query.get(current_user.id).highscore: User.query.get(current_user.id).highscore += 10 db.session.commit() return redirect(url_for("proceed_online")) # incorrect answer else: flash("Answer is wrong! Your score has been reset to 0.", "danger") # reset current score to 0 User.query.get(current_user.id).currentscore = 0 db.session.commit() return redirect(url_for("proceed_online"))