def sortQBs(): db = utils.connect_db('nfl', remove_existing = False) player_collection = db['players'] rankedQB = db['QB'] rankedQB.drop() for each in player_collection.find( {"Position" : 'QB'} ): if "Passing" in each: passingTD = int(each['Passing']['TD']) passingYds = int(str(each['Passing']['Yds']).replace(",", "")) passingComp = int(each['Passing']['Comp']) passingPct = 10 * float(each['Passing']['Pct']) passingInt = int(each['Passing']['Int']) passingSck = int(each['Passing']['Sck']) passingTotal = (passingTD * 200) + (passingYds * 2) + (passingComp * 10) + (10 * (passingPct - 50) ) - (passingInt * 200) - (passingSck * 25) else: passingTotal = 0 if "Rushing" in each: rushingTD = each['Rushing']['TD'] rushingFUM = each['Rushing']['FUM'] rushingYds = each['Rushing']['Yds'] rushingTotal = (rushingTD * 150) + (rushingYds * 2) - (rushingFUM * 150) else: rushingTotal = 0 totalScore = passingTotal + rushingTotal each['Score'] = totalScore rankedQB.insert(each) for each in rankedQB.find().sort('Score',-1): print each['Name'] print each['Score']
def __init__(self): self.sql = connect_db(DB_HOST) self.s3 = connect_s3() self.today = datetime.datetime.now().strftime('%Y%m%d') self.writer_normal = None self.writer_IT = None self.writer_name = None
def before_request(): g.graph = maingraph g.db = connect_db() if session.get('logged_in', False): u = session.get('user') if u is not None: g.userid = u.get('openid')
def sortDLs(): db = utils.connect_db('nfl', remove_existing = False) player_collection = db['players'] rankedDL = db['DL'] rankedDL.drop() for each in player_collection.find({ "$or" : [ {"Position" : 'NT'} , {"Position" : 'DT'} , {"Position" : 'DE'}]}): if "Defensive" in each: defenseSolo = int(each['Defensive']['Total']) defenseAst = int(each['Defensive']['Ast']) defenseSack = int(each['Defensive']['Sck']) defensePDef = int(each['Defensive']['PDef']) defenseInt = int(each['Defensive']['Int']) defenseTD = int(each['Defensive']['TDs']) defenseYds = int(each['Defensive']['Yds']) defenseTotal = (defenseSolo * 100) + (defenseAst * 50) + (defenseSack * 350) + (defensePDef * 150) + (defenseInt * 500) + (defenseTD * 500) + (defenseYds * 5) else: defenseTotal = 0 if "Fumbles" in each: ForcedFumble = int(each['Fumbles']['FF']) FumbleTD = int(each['Fumbles']['TD']) fumbleTotal = (ForcedFumble * 500) + (FumbleTD * 500) else: fumbleTotal = 0 totalScore = defenseTotal + fumbleTotal each['Score'] = totalScore rankedDL.insert(each) for each in rankedDL.find().sort('Score',-1): print each['Name'] print each['Score']
def main(): db, coll = connect_db() print(coll) print('Collection has {} documents'.format(coll.count())) map = """ function() { var words = this.content; words.forEach(function(word){ if(word.length > 3) { emit(word, 1); } }); } """ reduce = """ function(key,values) { return Array.sum(values); } """ # word count for all documents query = {} # filter documents #query = { '$or': [{'name': 'shakespeare-hamlet'}, {'name': 'shakespeare-macbeth'}]} coll.map_reduce(map, reduce, 'word_count', query=query) # Results stored in collection "word_count" result = db['word_count'] cursor = result.find().sort('value', pymongo.DESCENDING) print('Word count for {} words'.format(cursor.count())) for elem in cursor.limit(25): print(elem)
def __init__(self, parent): super(PortStatus, self).__init__(parent) self.con = utils.connect_db() self.port = model.Port(self.con) self.vbox_layout = QVBoxLayout() self.vbox_layout.setSpacing(0) self.vbox_layout.setContentsMargins(10,0,10,0) self.setLayout(self.vbox_layout) self.ship_views = [] self.now_deck = 1 self.deckselector = DeckSelector(self.con, self) self.deckselector.deck_selected.connect(self.on_deck_selected) self.vbox_layout.addWidget(self.deckselector) self.deckselector.show() for _ in range(6): ui = ShipStatus(self) self.ship_views.append(ui) self.vbox_layout.addWidget(ui) ui.hide() self.vbox_layout.addItem(QSpacerItem(40, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
def __init__(self): db = utils.connect_db('Two_Pick_Too_Drunk') collection = db['user'] last_update_start = datetime.today() reviews = 'reviewer' clusters = 'reviewer_cluster' updated = (False,1) recommenderer = Recommender() while 1: users = collection.find({"last_beer_update":{"$gte":last_update_start}}) start_time = time.time() for user in users: print user['name'] (results,result_set) = recommenderer.recommender(user["Beers_Rated"], reviews, clusters, db) collection.update({"_id": user["_id"]}, {"$set": {"Recommendations": results}}) updated = (True,1) end_time = time.time() if updated[0]: print 'done with updation after %.3f seconds'%(end_time-start_time) last_update_start = datetime.now() if updated[0]: time.sleep(updated[1]*5); print 'Slept for '+str(updated[1]*10) + ' seconds' updated = (False,updated[1]) else: if updated[1] < 30: updated = (False,updated[1]+1) time.sleep(updated[1]*10); print 'Slept for '+str(updated[1]*10) + ' seconds' else: time.sleep(updated[1]*10); print 'Slept for '+str(updated[1]*10) + ' seconds'
def search_results(self, query): db = utils.connect_db('Two_Pick_Too_Drunk') tokens = self.tokenize(query) index = db['search'].find_one({"SearchIndex":"Beer"})['Index'] self.term_idf = db['search'].find_one({"SearchIndex":"Beer"})['termIdf'] id_sets = list() for token in tokens: if token in index: id_sets += index[token] id_sets = set(id_sets) ids = list(id_sets) if not id_sets or not all(id_sets): return [] beers = Beer.find({"BeerId":{"$in":ids}}) beerReturn = list() for x in beers: beerReturn.append({ "Name":x.Name, "BeerId":x.BeerId, "Brewery":x.Brewery, "AverageRating":x.AverageRating, "vect":x.vect, "tokens":x.tokens}) query_vect = self._normed_vect(tokens) for beer in beerReturn: beer['sim'] = sum( query_vect[k]*beer['vect'].get(k,0) for k in query_vect ) return heapq.nlargest(20, beerReturn, key=operator.itemgetter('sim'))
def before_request(): g.db = utils.connect_db() g.user = None if 'uid' in session: g.user = utils.queryUser(session['uid']) if g.user["uenabled"]!=1: g.user=None
def main(): db = utils.connect_db('Two_Pick_Too_Drunk') beer_collection = db['beer'] ObannonsBeerList = OBD.keys() print 'Sorting Obannon\'s reviews' reviews = utils.read_beers() obannonsReviews = list() obannonsDict = dict() json = open('Reviews/ObannonsData.json','w') for review in reviews: if review['BeerId'] in ObannonsBeerList: obannonsReviews.append(review) s = ujson.dumps(review) json.write(s+'\n') json.close() reviewersorter = review_sorter('obannons_reviews') reviewersorter.sort_reviews(obannonsReviews) for review in obannonsReviews: if review['BeerId'] in obannonsDict: obannonsDict[review['BeerId']].append(review['Reviewer']) else: obannonsDict[review['BeerId']] = [review['Reviewer']] print 'Reveiews per Obannons Beers' for beer in obannonsDict: Beer = beer_collection.find_one({"BeerId":beer}) print 'Beer: '+Beer['Brewery']+ ' '+ Beer['Name'] + '\nNumber of reviews: '+str(len(obannonsDict[beer]))+'\n'
def main(): db = utils.connect_db('Two_Pick_Too_Drunk') beer_collection = db['beer'] beer_collection.remove() beer_rating = BeerAverage() print 'Loading Beers' beers = utils.read_beers() brewery = [] beersCount = 0 for beer in beers: beersCount +=1 if beer['Brewery'] not in brewery: brewery.append(beer['Brewery']) if beer['BeerId'] in beer_rating: doc = {'Brewery' : beer['Brewery'], 'BeerId' : beer['BeerId'], 'Name' : beer['Name'], 'BreweryId' : beer['BreweryId'], 'AverageRating': beer_rating[beer['BeerId']] } else: doc = {'Brewery' : beer['Brewery'], 'BeerId' : beer['BeerId'], 'Name' : beer['Name'], 'BreweryId' : beer['BreweryId'] } beer_collection.insert(doc) print str(beersCount) + ' Beers over ' + str(len(brewery)) + ' Brewerys'
def receive_message(): sender = request.values.get('From',None) db = connect_db('MONGOHQ_URL', 'MONGO_DATABASE_NAME') user = db.users.find_one({"number":sender}) message = request.values.get('Body',None) resp = twilio.twiml.Response() if user != None: try: email = user['email'] except KeyError: is_valid = verify(message) if is_valid or is_valid == None: db.users.update_one({ '_id': user['_id'] },{ '$set': { 'email': message } }, upsert=False) resp.message("Thank you! You've been added to the waitlist. As soon as we can work through our current orders, we'll be in touch!") else: resp.message("Please send us a valid email address!") else: resp.message("You are currently on the waitlist. We will contact you shortly!") else: """Respond to incoming calls with a simple text message.""" resp.message("Welcome to Green Street! To get started, please send us your email address. We will only use it to send you order confirmations, receipts, and updates.") db.users.insert({"number":sender}) return str(resp)
def completed_tickets(): db_connection = connect_db(APP) completed_tickets_cursor = db_connection.execute( 'select * from tickets where status="Complete"') print completed_tickets_cursor completed_tickets_headers = get_header(APP, 'tickets') formatted_completed_tickets_headers = [ format_headers(h) for h in completed_tickets_headers] completed_tickets = {} for name in completed_tickets_headers: completed_tickets[name] = [] completed_tickets['url'] = [] completed_tickets['delete'] = [] for row in completed_tickets_cursor: for name, item in zip(completed_tickets_headers, row): completed_tickets[name].append(item) completed_tickets['url'].append('/ticket?ticket_number={}'.format(row[0])) completed_tickets['delete'].append( '/api/deleteticket?ticket_number={}'.format(row[0]) ) return render_template( 'completedtickets.html', tickets=completed_tickets, completed_tickets=completed_tickets, completed_tickets_headers=completed_tickets_headers, formatted_completed_tickets_headers=formatted_completed_tickets_headers )
def post(self): args = self.request.arguments print args db = utils.connect_db('FridgeRaider') collection = db['User'] u = User.search(email=args['email'][0].lower()).first() if u: error_msg = u"?error=" + tornado.escape.url_escape("Email already registered") self.redirect(u"/account/register" + error_msg) elif args['password1'][0] != args['password2'][0]: error_msg = u"?error=" + tornado.escape.url_escape("Passwords did not match") self.redirect(u"/account/register" + error_msg) else: user = User( displayName = args['Name'][0], email = args['email'][0], password = sha256_crypt.encrypt(args['password1'][0]), ingredients = [] ) user.save() self.set_current_user(user) self.redirect("/account/home")
def deletemultitrack_api(): """ Deletes a multitrack from the multitracks, adds entry to ticket_history Returns ------- Redirects page to ticket info """ db_connection = connect_db(APP) date_updated = strftime("%m-%d-%y %H:%M:%S", gmtime()) ticket_number = request.args.get('ticket_number') multitrack_id = request.args.get('multitrack_id') ticket_revision_id_cursor = db_connection.execute( "select ticket_revision_id from ticket_history where ticket_number={}".format(ticket_number)) ticket_revision_ids_strings = [t[0].split("-")[1] for t in ticket_revision_id_cursor] ticket_revision_ids = [int(t) for t in ticket_revision_ids_strings] if len(ticket_revision_ids_strings) == 0: ticket_revision_id = "{}-1".format(ticket_number) else: ticket_revision_number = numpy.max(ticket_revision_ids) + 1 ticket_revision_id = "{}-{}".format(ticket_number, ticket_revision_number) temp_cursor = db_connection.execute('select * from tickets where ticket_number={} order by date_updated asc'.format(ticket_number)) temp_row = list(temp_cursor.next()) ticket_history_cursor = db_connection.execute('select * from ticket_history where ticket_number={} order by date_updated'.format(ticket_number)) rows = [list(t) for t in ticket_history_cursor] row = rows[-1] db_connection.execute( 'delete from multitracks where multitrack_id="{}"'.format(multitrack_id)) num_multitracks_cursor = db_connection.execute( "select * from multitracks where ticket_number={}".format(ticket_number)) num_multitracks_list = [t[0] for t in num_multitracks_cursor] num_multitracks = str(len(num_multitracks_list)) row[1] = ticket_revision_id if date_updated != "": row[5] = date_updated db_connection.execute('update tickets set date_updated = "{}" where ticket_number = {}'.format(date_updated, ticket_number)) if num_multitracks != "": row[6] = str(num_multitracks) db_connection.execute('update tickets set number_of_tracks = {} where ticket_number = {}'.format(num_multitracks, ticket_number)) row = [str(r) if r != "" else u"" for r in row] insert_vals = '","'.join(row) db_connection.execute( 'insert into ticket_history values("{}");'.format(insert_vals) ) db_connection.commit() return redirect("/ticket?ticket_number={}".format(ticket_number))
def __init__(self): self.db = utils.connect_db("STOS", True) self.user_connections = defaultdict(set) self.graph = nx.DiGraph() self.user_to_id = defaultdict(int) self.id_to_user = defaultdict(str) self.auth = 0 self.hub = 0
def initialize(cls): #call this function under same thread that invoke dispatch() db.initialize() cls.debug_con = utils.connect_debug_db() cls.con = utils.connect_db() cls.tables = [r[0] for r in cls.con.execute('select name from sqlite_master where type="table";')] cls.table_cols = {t:get_cols(cls.con, t) for t in cls.tables}
def post(self): email = self.get_argument("email", "") password = self.get_argument("password", "") db = utils.connect_db('FridgeRaider') u = User.search(email=email.lower()).first() if sha256_crypt.verify(password,u.password): self.set_current_user(u) self.redirect("/account/home") else: error_msg = tornado.escape.url_escape("Invalid Login") self.redirect("/account/login?error="+error_msg)
def show_ratings(self,_args,ObannonsBeerDict): args=list() for arg in _args: args.append({'BeerId':arg,'Rating':float(_args[arg][0]) }) user_ratings = args db = utils.connect_db('Two_Pick_Too_Drunk') reviews = 'obannons_reviews' clusters = 'obannons_reviews_cluster' recommenderer = Recommender() (results,result_set) = recommenderer.recommender(user_ratings, reviews, clusters, db, 0, 15) self.render("ratings.html",OBD=ObannonsBeerDict,results=results, result_set = result_set)
def show_ratings(self, _args, ObannonsBeerDict): args = list() for arg in _args: args.append({"BeerId": arg, "Rating": float(_args[arg][0])}) user_ratings = args db = utils.connect_db("Two_Pick_Too_Drunk") reviews = "obannons_reviews" clusters = "obannons_reviews_cluster" recommenderer = Recommender() (results, result_set) = recommenderer.recommender(user_ratings, reviews, clusters, db) self.render("ratings.html", OBD=ObannonsBeerDict, results=results, result_set=result_set)
def main(): db = utils.connect_db('nfl', remove_existing=True) stats = read_json('./data/player_stats.json') #player stat collection player_collection = db['players'] player_collection.ensure_index([('id',pymongo.ASCENDING), ('Name', pymongo.ASCENDING), ('Team', pymongo.ASCENDING), ('Position', pymongo.ASCENDING) ]) count = 0 for stat in stats: stat['id']=count count += 1 print 'Inserting player:', stat['Name'] player_collection.insert(stat) tweets = read_json('./data/player_sentiment.json') tweets_collection = db['tweets'] tweets_collection.ensure_index([('Name', pymongo.ASCENDING)]) happiness = defaultdict(int) sadness = defaultdict(int) for tweet in tweets: if tweet['sentiment'] == 'positive': happiness[(tweet['Name'], tweet['Team'])] += 1 else: sadness[(tweet['Name'], tweet['Team'])] += 1 tweets_collection.insert(tweet) for player in happiness: name, team = player if name == "Chris Clemons" or name == "Chris Harris" or name == "Kyle Williams" or name == "Mike Adams": if name == "Chris Harris" and team == "NFL Free Agents": team = "Jacksonville Jaguars" player_collection.update({"Name":name, "Team":team},{'$set': {"num_pos": happiness[player]}}) else: player_collection.update({"Name":name},{'$set': {"num_pos": happiness[player]}}) for player in sadness: name, team = player if name == "Chris Clemons" or name == "Chris Harris" or name == "Kyle Williams" or name == "Mike Adams": if name == "Chris Harris" and team == "NFL Free Agents": team = "Jacksonville Jaguars" player_collection.update({"Name":name, "Team":team},{'$set': {"num_neg": sadness[player]}}) else: player_collection.update({"Name":name},{'$set': {"num_neg": sadness[player]}})
def main(): """ Clustering! Takes in agrument from commandline to with reviewer collection you want to use to cluster. Pretty much your choices are 'reviewer' or 'obannons_reviews'. Clustering will then take place and the results stored in the data base in either 'reviewer_clusters' or 'obannons_reviews_cluster' with each cluster having a list of users in the cluster """ db = utils.connect_db('Two_Pick_Too_Drunk') reviewer_collection = db[sys.argv[1]] reviews = reviewer_collection.find() cluster = ClusterAnalyer() cluster.cluster(reviews,db,sys.argv[1])
def get_Recommendations(self): start_time = time.time() print "starting" user= self.get_current_user() db = utils.connect_db('Two_Pick_Too_Drunk') reviews = 'reviewer' clusters = 'reviewer_cluster' recommenderer = Recommender() (results,result_set) = recommenderer.recommender(user.Beers_Rated, reviews, clusters, db) end_time = time.time() print 'done with updation after %.3f seconds'%(end_time-start_time)
def post(self): nick = self.get_argument("email", "") password = self.get_argument("password", "") db = utils.connect_db('Two_Pick_Too_Drunk') p_hash =sha256_crypt.encrypt(password) u = User.search(nick_l=nick.lower()).first() print u if u: self.set_current_user(u) self.redirect("/account/home") else: error_msg = tornado.escape.url_escape("Wrong information") self.redirect("/account/login?error="+error_msg)
def main(): db, coll = connect_db() print(coll) # Remove existing documents coll.remove({}) for id in gutenberg.fileids(): print(id, len(list(gutenberg.words(id))), 'words') result = coll.insert_one({ 'name': id.split('.')[0], 'content': list(gutenberg.words(id)) }) print('Collection has {} documents'.format(coll.count()))
def updatemultitrack_api(): """ API for updating multitrack Updates value in multitracks table Returns ------- """ title = request.args.get('title') artist_name = request.args.get('artist_name') start_time = request.args.get('start_time') end_time = request.args.get('end_time') genre = request.args.get('genre') num_instruments = request.args.get('num_instruments') db_connection = connect_db(APP) # code to UPDATE row ticket_number = request.args.get('ticket_number') multitrack_id = request.args.get('multitrack_id') if title != "": db_connection.execute('update multitracks set title = "{}" where multitrack_id = "{}"'.format(title, multitrack_id)) if artist_name != "": db_connection.execute('update multitracks set artist = "{}" where multitrack_id = "{}"'.format(artist_name, multitrack_id)) if start_time != "": db_connection.execute('update multitracks set start_time = "{}" where multitrack_id = "{}"'.format(start_time, multitrack_id)) if end_time != "": db_connection.execute('update multitracks set end_time = "{}" where multitrack_id = "{}"'.format(end_time, multitrack_id)) if genre != "null": db_connection.execute('update multitracks set genre = "{}" where multitrack_id = "{}"'.format(genre, multitrack_id)) if num_instruments != "": db_connection.execute('update multitracks set number_of_instruments = "{}" where multitrack_id = "{}"'.format(num_instruments, multitrack_id)) db_connection.commit() return jsonify( multitrack_id=multitrack_id, ticket_number=ticket_number, title=title, artist_name=artist_name, start_time=start_time, end_time=end_time, genre=genre, num_instruments=num_instruments )
def multitrack(): """ Views more information about a single multitrack within a ticket given a multitrack ID. Gives info on status of multitrack. Return ------ 'multitrack.html': rendered template multitrack_id: int requested by the user ticket_status_headers: list headers of ticket status table formatted_ticket_status_headers: list formatted headers of ticket status table multitrack_status: dictionary contents in multitrack status table """ ticket_number = request.args.get('ticket_number') multitrack_id = request.args.get('multitrack_id') db_connection = connect_db(APP) multitrack_status_headers = get_header(APP, 'multitracks') formatted_multitrack_status_headers = [ format_headers(h) for h in multitrack_status_headers ] multitrack_status_cursor = db_connection.execute( 'select * from multitracks where multitrack_id="{}"'.format( multitrack_id ) ) multitrack_status = fill_table( multitrack_status_headers, multitrack_status_cursor ) update_multitrack_url = "/multitrack_update?multitrack_id={}&ticket_number={}".format(str(multitrack_id),ticket_number) ticket_url = "/ticket?ticket_number={}".format(ticket_number) return render_template( 'multitrack.html', multitrack_id=multitrack_id, ticket_number=ticket_number, multitrack_status_headers=multitrack_status_headers, formatted_multitrack_status_headers=formatted_multitrack_status_headers, multitrack_status=multitrack_status, update_multitrack_url=update_multitrack_url, ticket_url=ticket_url )
def generate_recs_from_model(meta_path, tfidf_path, model_path): print("Generating Recommendations...") sqldb = connect_db(db_path) records = query_db( sqldb, '''select feedurl, author, id, title, content, flags from rss_item order by pubDate DESC LIMIT 200;''' ) content_list = [] outcome_list = [] id_list = [] title_list = [] for record in records: # We should not judge the book by it's cover content_list.append('||' + record['feedurl'] + '|| \n ||' + record['author'] + '|| \n ||' + record['title'] + '|| \n' + record['content']) outcome_list.append( (record['flags'] is not None and 'r' not in record['flags'] and 's' in record['flags']) * 1) id_list.append(record['id']) # Yes, we are judging the book by it's cover but we are using the cool NLP model to judge title_list.append(record['title']) print("Total %d feed items found" % (len(content_list))) print(content_list[0]) #Loading the pickle files meta = pickle.load(open(meta_path, 'rb')) out = pickle.load(open(tfidf_path, 'rb')) model = pickle.load(open(model_path, 'rb')) v = out['v'] print("Projecting them to a mathematical space..") X_tfidf = v.transform(content_list) X_smart = cool_nlp_model.encode(title_list) clf = model['clf'] beclf = model['beclf'] y = out['y'] X_tfidf = X_tfidf.todense().astype(np.float32) y = np.array(y).astype(np.float32) print("Recommending...") s_tfidf = clf.decision_function(X_tfidf) s_smart = beclf.decision_function(X_smart) s = s_smart * 0.65 + s_tfidf * 0.35 sortix = np.argsort(-s) recs = sortix[y[sortix] == 0] recs = recs[:max_recommendations] print(recs) print([id_list[x] for x in recs]) return [id_list[x] for x in recs]
def setCentroids(self,k): print 'Intializeing Centroids' print str(k) + ' Clusters will be created' x=0 CentroidsList = [] db = utils.connect_db('Two_Pick_Too_Drunk') bc = db['beer'] beers = list() for y in bc.find(): beers.append(y['BeerId']) while x<k: centroid = {} for token in beers: centroid[token]=random.random() CentroidsList.append(centroid) x+=1 return CentroidsList
def sortDBs(): db = utils.connect_db('nfl', remove_existing = False) player_collection = db['players'] rankedDB = db['DB'] rankedDB.drop() for each in player_collection.find({ "$or" : [ {"Position" : 'FS'} , {"Position" : 'SS'} , {"Position" : 'CB'} , {"Position" : 'DB'} , {"Position" : 'SAF'} ] } ): if "Defensive" in each: defenseSolo = int(each['Defensive']['Total']) defenseAst = int(each['Defensive']['Ast']) defenseSack = int(each['Defensive']['Sck']) defensePDef = int(each['Defensive']['PDef']) defenseInt = int(each['Defensive']['Int']) defenseTD = int(each['Defensive']['TDs']) defenseYds = int(each['Defensive']['Yds']) defenseTotal = (defenseSolo * 100) + (defenseAst * 50) + (defenseSack * 200) + (defensePDef * 200) + (defenseInt * 500) + (defenseTD * 200) + (defenseYds * 5) else: defenseTotal = 0 if "Fumbles" in each: ForcedFumble = int(each['Fumbles']['FF']) FumbleTD = int(each['Fumbles']['TD']) fumbleTotal = (ForcedFumble * 500) + (FumbleTD * 200) else: fumbleTotal = 0 if "Kick Return" in each: kreturnTD = int(each['Kick Return']['TD']) kreturnYds = int(str(each['Kick Return']['Yds']).replace(",", "")) kreturnFum = int(each['Kick Return']['FUM']) kreturnTotal = (kreturnTD * 600) + (kreturnYds * 1) - (kreturnFum * 350) else: kreturnTotal = 0 if "Punt Return" in each: preturnTD = int(each['Punt Return']['TD']) preturnYds = int(str(each['Punt Return']['RetY']).replace(",", "")) preturnFum = int(each['Punt Return']['FUM']) preturnTotal = (preturnTD * 600) + (preturnYds * 2) - (preturnFum * 350) else: preturnTotal = 0 totalScore = defenseTotal + kreturnTotal + preturnTotal + fumbleTotal each['Score'] = totalScore rankedDB.insert(each) for each in rankedDB.find().sort('Score',-1): print each['Name'] print each['Score']
def update_conversation(self, end=False): db = connect_db() params = {'access_token': self.access_token} url = 'https://graph.facebook.com/v2.5/%s' % self.con_id r = requests.get(url, params=params) data = r.json() try: message_type = self.responses[str(self.response)]['message_type'] except KeyError: response = self.response else: response = self.response+1 self.user[message_type] = self.last_message self.response = response print response, self.user, data['updated_time'] db.conversations.update({'con_id':self.con_id}, {'con_id':self.con_id, 'updated_time':data['updated_time'], 'response': response, 'user': self.user})
def generate_tfidf_pickles(): """Gets all the read articles and considers those articles flagged as 's' as 1 and rest as 0 and produces the embeddings """ sqldb = connect_db(db_path) records = query_db(sqldb, '''select feedurl, author, id, title, content, flags from rss_item where unread=0 order by pubDate DESC;''') content_list = [] outcome_list = [] id_list = [] title_list = [] for record in records: # We should not judge the book by it's cover content_list.append('||'+ record['feedurl'] + '|| \n ||' + record['author'] + '|| \n ||' + record['title'] + '|| \n' + record['content']) outcome_list.append((record['flags'] is not None and 'r' not in record['flags'] and 's' in record['flags']) * 1) id_list.append(record['id']) # Yes, we are judging the book by it's cover but we are using the cool NLP model to judge title_list.append(record['title']) print("Total %d feed items found" %(len(content_list))) print(content_list[0]) # compute tfidf vectors with scikits v = TfidfVectorizer(input='content', encoding='utf-8', decode_error='replace', strip_accents='unicode', lowercase=True, analyzer='word', stop_words='english', token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b', ngram_range=(1, 2), max_features = max_features, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True, max_df=1.0, min_df=1) v.fit(content_list) print("Projecting them to a mathematical space..") X_tfidf = v.transform(content_list) X_smart = cool_nlp_model.encode(title_list) out = {} out['X_tfidf'] = X_tfidf out['X_smart'] = X_smart out['y'] = outcome_list out['v'] = v #print("writing", tfidf_path) safe_pickle_dump(out, tfidf_path) out = {} out['vocab'] = v.vocabulary_ out['idf'] = v._tfidf.idf_ out['ids'] = id_list out['idtoi'] = {x:i for i,x in enumerate(id_list)} #print("Writing Meta Data") safe_pickle_dump(out, meta_path)
def deleteticket_api(): """ Deletes a ticket from the tickets table. Returns ------- Refreshes viewtickets.html """ ticket_number = request.args.get('ticket_number') db_connection = connect_db(APP) db_connection.execute('delete from tickets where ticket_number={}'.format(ticket_number)) db_connection.execute('delete from ticket_history where ticket_number={}'.format(ticket_number)) db_connection.execute('delete from multitracks where ticket_number={}'.format(ticket_number)) db_connection.commit() return redirect(url_for("view_tickets"))
def sortRBs(): db = utils.connect_db('nfl', remove_existing = False) player_collection = db['players'] rankedRB = db['RB'] rankedRB.drop() for each in player_collection.find( {"Position" : 'RB'} ): if "Rushing" in each: rushingTD = each['Rushing']['TD'] rushingFUM = each['Rushing']['FUM'] rushingYds = each['Rushing']['Yds'] rushingTotal = (rushingTD * 700) + (rushingYds * 8) - (rushingFUM * 500) else: rushingTotal = 0 if "Receiving" in each: receivingTD = int(each['Receiving']['TD']) receivingYds = int(str(each['Receiving']['Yds']).replace(",", "")) receivingFum = int(each['Receiving']['FUM']) receptions = int(each['Receiving']['Rec']) receivingTotal = (receivingTD * 650) + (receivingYds * 6) - (receivingFum * 300) + (receptions * 75 ) else: receivingTotal = 0 if "Kick Return" in each: kreturnTD = int(each['Kick Return']['TD']) kreturnYds = int(str(each['Kick Return']['Yds']).replace(",", "")) kreturnFum = int(each['Kick Return']['FUM']) kreturnTotal = (kreturnTD * 600) + (kreturnYds * 1) - (kreturnFum * 350) else: kreturnTotal = 0 if "Punt Return" in each: preturnTD = int(each['Punt Return']['TD']) preturnYds = int(str(each['Punt Return']['RetY']).replace(",", "")) preturnFum = int(each['Punt Return']['FUM']) preturnTotal = (preturnTD * 600) + (preturnYds * 2) - (preturnFum * 350) else: preturnTotal = 0 totalScore = rushingTotal + receivingTotal + kreturnTotal + preturnTotal each['Score'] = totalScore rankedRB.insert(each) for each in rankedRB.find().sort('Score',-1): print each['Name'] print each['Score']
def main(): db = utils.connect_db('Two_Pick_Too_Drunk') beer_collection = db['beer'] beer_collection.remove() beer_rating = BeerAverage() print 'Loading Beers' beers = utils.read_beers() brewery = [] beersCount = 0 for beer in beers: beersCount +=1 if beer['Brewery'] not in brewery: brewery.append(beer['Brewery']) if beer['BeerId'] in beer_rating: doc = {'Brewery' : beer['Brewery'], 'BeerId' : beer['BeerId'], 'Name' : beer['Name'], 'BreweryId' : beer['BreweryId'], 'AverageRating': beer_rating[beer['BeerId']] } else: doc = {'Brewery' : beer['Brewery'], 'BeerId' : beer['BeerId'], 'Name' : beer['Name'], 'BreweryId' : beer['BreweryId'] } beer_collection.insert(doc) print str(beersCount) + ' Beers over ' + str(len(brewery)) + ' Brewerys' search = Search() beers = beer_collection.find() search.index_beers(beers) print 'inputing into DB' for beer in search.beers: beerData= search.beers[beer] beer_collection.update({"BeerId":beerData['BeerId']},{"$set":{"tokens":beerData["tokens"],"vect":beerData["vect"]}}) index = {} for x in search.index: index[x] = list(search.index[x]) db['search'].remove({"SearchIndex":"Beer"}) db['search'].insert({"SearchIndex":"Beer","Index":index,"termIdf":search.term_idf})
def __init__(self): db = utils.connect_db('Two_Pick_Too_Drunk') collection = db['user'] last_update_start = datetime(2012, 12, 6) reviews = 'reviewer' clusters = 'reviewer_cluster' recommenderer = Recommender() while 1: users = collection.find({"last_beer_update":{"$gte":last_update_start}}) start_time = time.time() for user in users: (results,result_set) = recommenderer.recommender(user["Beers_Rated"], reviews, clusters, db) collection.update({"_id": user["_id"]}, {"$set": {"Recommendations": results}}) end_time = time.time() print 'done with updation after %.3f seconds'%(end_time-start_time) last_update_start = datetime.now() time.sleep(10)
def get_goals_odds(): # Check both request.json and request.args # Depending on how the function is called (eg. browser get vs curl post) params = flask.request.json if params is None: params = flask.request.args if params: team_home = params.get("team_home") team_away = params.get("team_away") if team_home and team_away: db_ = utils.connect_db(remote=True) result = plotting.plot_odds_goals(db_, team_home, team_away) if result: return flask.render_template('goals_timeseries.html') return "Match %s - %s not found!" % (team_home, team_away) return "Match %s - %s not found!" % (team_home, team_away)
def job(): try: print('%s started running the job...' % datetime.now()) short_cut_file = cf.get('saplogon', 'short_cut_file') popup_win_title = cf.get('saplogon', 'popup_win_title') pin = cf.get('saplogon', 'pin') wait_sec = cf.get('saplogon', 'wait_sec') logon_sap(short_cut_file, popup_win_title, pin, wait_sec) time.sleep(2) j = 5 while j > 0: session = start_sap() if session: break else: j -= 1 time.sleep(1) if session: ip = cf.get('db', 'ip') db = cf.get('db', 'db') conn, cursor = connect_db(ip, db) tcode = cf.get('transaction', 'tcode') company = cf.get('transaction', 'company') sort_variant = cf.get('transaction', 'sort_variant') total_records_updated = execute_transaction( session, tcode, company, sort_variant, conn, cursor) close_sap(session) send_email('%s Asset updated' % total_records_updated, "*****@*****.**") else: send_email("Failed logon SAP", '*****@*****.**') print('%s finished running the job...' % datetime.now()) except Exception, e: send_email("Asset interface run with error %s" % str(e), '*****@*****.**') raise
def job(): try: print('%s started running the job...' % datetime.now()) short_cut_file = cf.get('saplogon', 'short_cut_file') popup_win_title = cf.get('saplogon', 'popup_win_title') pin = cf.get('saplogon', 'pin') wait_sec = cf.get('saplogon', 'wait_sec') logon_sap(short_cut_file, popup_win_title, pin, wait_sec) time.sleep(2) j = 30 while j > 0: session = start_sap() if session: break else: j -= 1 time.sleep(1) if session: conn, cursor = connect_db(cf.get('db', 'ip'), cf.get('db', 'db'), cf.get('db', 'uid'), cf.get('db', 'pwd')) order_lines = get_from_db(cursor) for i in order_lines: print(i) #order_lines =[['22232516','10','11255594', '']] execute_transaction(session, cursor, order_lines) close_db(conn) close_sap(session) send_email('CO11N processed %s records' % len(order_lines), "*****@*****.**") else: send_email("Failed logon SAP", '*****@*****.**') print('%s finished running the job...' % datetime.now()) except Exception, e: send_email("order confirmation run with error %s" % str(e), '*****@*****.**') raise
def get_conversations(access_token): db = connect_db() params = {'access_token': access_token} page_id = '102940933437533' url = 'https://graph.facebook.com/v2.5/%s/conversations' % page_id r = requests.get(url, params=params) data = r.json()['data'] for convo in data: id = str(convo['id']) conversation = db.conversations.find_one({'con_id': id}) if conversation is None: print access_token conversation = Conversation(id, 0, None, access_token, {}) db.conversations.insert({ 'con_id': id, 'updated_time': convo['updated_time'], 'response': 0, 'user': {} }) conversation.respond() elif convo['updated_time'] != conversation['updated_time']: conversation = Conversation(id, conversation['response'], None, access_token, conversation['user']) conversation.respond()
import graphene from graphene_mongo import MongoengineObjectType from models import Vulnerability, Target from models import Project as Proj from models import ThreatModel, Test, Repo, RepoTestCase from models import UseCase, AbuseCase, VulnerabilityEvidence, Scan, Interaction from mongoengine import DoesNotExist from graphene.relay import Node from utils import connect_db, _validate_jwt connect_db() class Vuln(MongoengineObjectType): class Meta: model = Vulnerability class Project(MongoengineObjectType): class Meta: model = Proj class TCase(MongoengineObjectType): class Meta: model = Test class TModel(MongoengineObjectType): class Meta: model = ThreatModel
def __init__(self): self.sql = connect_db() self.s = requests.Session() self.result = []
def setUp(self): # this method runs before each test db = utils.connect_db('msl_unittest', remove_existing=True) ranker = MockPageRanker() self.ts = tweetsearch.TweetSearch(db, ranker) self.ts.index_tweets(iter(tiny_corpus))
import os import pandas as pd import utils BASE_DIR = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) DATA_DIR = os.path.join(BASE_DIR, 'data') SQL_DIR = os.path.join(BASE_DIR, 'src', 'sql') conn = utils.connect_db() query = utils.import_query(os.path.join(SQL_DIR, 'lifetime.sql')) df = pd.read_sql_query(query, conn) df.to_csv(os.path.join(DATA_DIR, 'lifetime.csv'), sep=';', index=False)
def get_product_list(): utils.connect_db() print('所有产品列表')
from flask import Flask , g from views.home import home from views.admin import admin from views.api import api from flask_debugtoolbar import DebugToolbarExtension import utils import config from datetime import date,datetime app = Flask(__name__) #load configs app.config.from_object('config') # toolbar = DebugToolbarExtension(app) #connect to database utils.connect_db(app) #config flask mail utils.config_mail(app) app.register_blueprint(home) app.register_blueprint(admin) app.register_blueprint(api)
import json import logging import re import traceback from redis import Redis from requests import Session from utils import connect_db if __name__ == "__main__": logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG) r = Redis() s = Session() db = connect_db() PATTERN = re.compile("portraitCallBack\((.+)\)") while True: qq = r.blpop("nick_queries")[1].decode("utf-8") try: ret = s.get( "http://r.pengyou.com/fcg-bin/cgi_get_portrait.fcg?uins={}". format(qq), timeout=7) ret.encoding = "gbk" o = json.loads(PATTERN.fullmatch(ret.text).group(1)) for item in o[qq]: if isinstance(item, str) and not item.startswith("http"): r.set("nick.{}".format(qq), item) db.messages.update_one({ "author": qq,
# Common Template Variables "wwwroot": config.wwwroot, "tree": config.trees[0], "trees": config.trees, "generated_date": config.generated_date, "config": config.template_parameters, # Error template Variables "error": "Tree '%s' is not a valid tree." % tree } template = "error.html" else: # Parse the search query qtext = querystring.get("q", "").decode('utf-8') q = query.Query(qtext) # Connect to database conn = utils.connect_db(tree) # Arguments for the template arguments = { # Common Template Variables "wwwroot": config.wwwroot, "tree": tree, "trees": config.trees, "config": config.template_parameters, "generated_date": config.generated_date } if conn: result = None if can_redirect: result = query.direct_result(conn, q) if result: path, line = result
@app.route('/error') def error(): return "Error!" @app.errorhandler(404) def page_not_found(e): try: url = request.url except Exception: url = "" return render_template('404.html', url=url), 404 app.jinja_env.globals['csrf_field'] = csrf_field with open(join(dirname(__file__), "../config.json")) as f: config = load(f) redis = Redis() db.init(connect_db(config), redis) db.ensure_indexes() app.secret_key = config["secret_key"] app.session_cookie_name = "SESSION_ID" app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 app.session_interface = RedisSessionManager(redis) if __name__ == "__main__": app.run("0.0.0.0", 10000, debug=True)
def set_db(): if not hasattr(g, 'sqlite_db'): g.sqlite_db = connect_db(app.config['DATABASE'])
# (df.bookie == "William Hill"), # (df.bookie == "Marathon Bet"), # (df.bookie == "Betfair Sportsbook"), # (df.bookie == "SunBets"), # (df.bookie == "Paddy Power"), # (df.bookie == "Unibet"), # (df.bookie == "Coral"), # (df.bookie == "Betfred"), # (df.bookie == "Bet Victor")] # # # Plot # fig, ax = plt.subplots(2, 1, figsize=(15,10)) # fig.fig_size = (22,10) # for filter_, bookie in zip(filters, bookies_to_plot): # ax[0].plot(df[filter_].datetime, df[filter_]['over2_5'], 'o-', label=bookie) # ax[0].set_title("Over 2.5") # ax[1].plot(df[filter_].datetime, df[filter_]['under2_5'], 'o-', label=bookie) # ax[1].set_title("Under 2.5") # ax[0].legend(loc='lower left') # ax[1].legend(loc='lower left') # fig.suptitle("%s | %s - %s | %s" % (str(m['match_datetime']), m['team_home'], m['team_away'], m['result']['score']), fontsize=16) # plt.show() # print("Last odds collected at: %s" % df[filter_].datetime.iloc[-1]) if __name__ == "__main__": db = utils.connect_db(remote=True) # plot_odds_goals_bokeh(db, "Atalanta", "Inter Milan") m = db.matches.find_one({'team_home': "Atalanta", 'team_away': "Inter Milan"}) res = utils.create_odds_df(m) print(res.head(5))
tweets=tweets, author=settings['author'], agree_to_honor_code=settings['agree_to_honor_code'], count=len(tweets), time=end_time - start_time, ) @bottle.route('/') def index(): return bottle.static_file('index.html', root='static') @bottle.route('/favicon.ico') def favicon(): return bottle.static_file('favicon.ico', root='static') @bottle.route('/static/<filename:path>') def server_static(filename): return bottle.static_file(filename, root='static') if __name__ == "__main__": db = utils.connect_db('msl', True) _searcher = tweetsearch.TweetSearch(db) _searcher.index_tweets(utils.read_tweets()) bottle.run(host=settings['http_host'], port=settings['http_port'], reloader=True)
def create_db(): db = connect_db(app.config['DATABASE']) with open(os.path.join(os.path.dirname(__file__), 'schema.sql'), mode='r') as f: db.cursor().executescript(f.read()) db.commit()
passages = [] ner = [] #Get list of NAS taxa terms species_list = get_species_list() #species_list = ['Oncorhynchus','Acipenser'] #testing # Database connection #documents = connect_db() # For each taxa term process sentences for each document: for term in species_list: print('working on {}'.format(term)) term_doc_list = get_doc_list(term) #term_doc_list = ['558'] #for local test documents = connect_db(term_doc_list) for doc in documents: for i in doc.itertuples(): # Get surrounding sentences for scope surround_sents = n_sents(i[2], doc['docid']) try: before_sent = doc.iloc[surround_sents[0]] middle_sent = doc.iloc[surround_sents[1]] after_sent = doc.iloc[surround_sents[2]] except IndexError: # couldn't get surrounding sentences continue # Sample passage passage = before_sent['words'] + middle_sent['words'] + after_sent[ 'words'] full_ners = before_sent['ners'] + middle_sent['ners'] + after_sent[
import utils # --------------------------- DB data initialization -------------------------- conn, cursor = utils.connect_db() # 1. db cleanup query = f""" TRUNCATE amazon_frontend_orderproducttuple, amazon_frontend_warehousestock, amazon_frontend_order, amazon_frontend_product CASCADE; DELETE FROM amazon_frontend_warehouse; """ utils.execute_and_commit(query, conn, cursor) # 2. create products query = f""" INSERT INTO amazon_frontend_product(name, description, price, seller) VALUES ('productA', 'A', 1.00, 'Brian'), ('productB', 'B', 1.00, 'Brian'), ('productC', 'C', 1.00, 'Brian'), ('productD', 'D', 1.99, 'Drew'); """ utils.execute_and_commit(query, conn, cursor) # 3. create warehouses query = f"""