def load_artwork(session): f2 = unicode_csv_reader(open("artwork_data.csv"), delimiter = ",") f2.next() for row in f2: artwork = model.Artwork() artwork.artworkId = int(row[0]) artwork.artistRole = row[3] if int(row[4])!= 19232: if int(row[4])!= 5265: if int(row[4])!= 3462: if int(row[4])!= 12951: artwork.artistId = int(row[4]) artwork.title = row[5] artwork.dateText = row[6] artwork.medium = row[7] if row[9].isdigit(): artwork.year = row[9] artwork.dimensions = row[11] if row[12].isdigit(): artwork.width = row[12] if row[13].isdigit(): artwork.height = row[13] if row[15].isdigit(): artwork.units = row[15] artwork.inscription = row[16] artwork.thumbnailCopyright = row[17] artwork.thumbnailURL = row[18] artwork.url = row[19] session.add(artwork) session.commit()
def join_carpool(trip, user): # Creates new TripPassenger object; returns appropriate message. carpool = db_session.query(model.Trip).filter_by(id=trip).first() driver = get_user_by_id(carpool.trip_driver) if user.id == carpool.trip_driver: return "You cannot join a carpool where you are the driver." if carpool.seats_available > 0: carpool_contact = driver.first_name + ", email: " + driver.email new_passenger = model.TripPassenger(trip_id=trip, passenger_id=user.id) seats_available = carpool.seats_available - 1 db_session.query(model.Trip).filter_by(id=carpool.id).update({"seats_available": seats_available}) db_session.add(new_passenger) db_session.commit() # Sends email to driver, confirming new passenger. message = ( user.first_name + " " + user.last_name + " (email: " + user.email + ") has joined your carpool to " + carpool.event_name + " on " + carpool.departure_date + "." ) send_email("inStep carpool addition", driver.email, "*****@*****.**", message) response = "You have been added to the carpool. The driver is " + carpool_contact return response else: return "That carpool is full."
def execute(self): if not self.task: self.no_active_tasks() return self.task.add_tomato(self.is_whole) session.commit() self.task.show_progress()
def delete_item(): user = db_session.query(User).get(g.user_id) item = db_session.query(Item).get(request.args.get("id")) db_session.delete(item) db_session.commit() flash("item deleted", "info") return redirect(url_for('manage_items'))
def delete_tutorial(id): if not g.user_id: return redirect(url_for("index")) tutorial = db_session.query(Tutorial).get(id) db_session.delete(tutorial) db_session.commit() return redirect(url_for("list_tutorials"))
def edi_book(self): a = self.liste.currentItem().text() b = session.query(Livre).filter_by(titre = a).first() el = diabook.Diabook() el.aut_line.setText(("%s" % unicode(b.auteur))) el.tit_line.setText(("%s" % unicode(b.titre))) el.gen_line.setText(("%s" % unicode(b.genre))) el.com_line.setText(("%s" % unicode(b.commentaire))) el.exec_() auteur = el.aut_line.text() titre = el.tit_line.text() genre = el.gen_line.text() commentaire = el.com_line.toPlainText() b.auteur = auteur b.titre = titre b.genre = genre b.commentaire = commentaire session.commit() item = self.liste.takeItem(self.liste.currentRow()) item.setText(titre) self.liste.addItem(item)
def close_trade(id): trade = Trade.query.filter_by(id=id).one() trade.close_date = datetime.datetime.utcnow() db_session.add(trade) db_session.commit() flash("Your trade has been marked as complete.", "success") return redirect("/trade_history")
def login(provider_name): response = make_response() result = authomatic.login(WerkzeugAdapter(request, response), provider_name) if result: # If we've received a user from Facebook... if result.user: # Get the user's profile data and look for it in our database result.user.update() facebook_id = result.user.id user = dbsession.query(User).filter_by(facebook_id = facebook_id).first() # If we don't find the user in our database, add it! if not user: user = User(facebook_id = facebook_id, email=result.user.email, name=result.user.name) dbsession.add(user) dbsession.commit() # Store the user in our session, logging them in login_user(user) # Redirect somewhere after log in. In this case, the homepage return redirect('/') return response
def load_users(session): with open("seed_data/u.user", "rb") as user_file: reader = csv.reader(user_file, delimiter="|") for row in reader: user = User(id=row[0], age=row[1], zipcode=row[4]) session.add(user) session.commit()
def load_ratings(session): with open("seed_data/u.data", "rb") as ratings_file: reader = csv.reader(ratings_file, delimiter="\t") for row in reader: rating = Rating(user_id=row[0], movie_id=row[1], rating=row[2]) session.add(rating) session.commit()
def execute(self): t = Task(self.name) if self.should_be_active: t.activate() session.add(t) session.commit() print "Added task %d." % t.id
def commit(self): self.commit_collection(self.batters, Batter) self.commit_collection(self.pitchers, Pitcher) self.commit_collection(self.teams, Team) session.add_all(self.events) session.add_all(self.games) session.commit()
def sign_up_form(): ## input new user input into database email = request.form.get("email") password = request.form.get("password") username = request.form.get("username") first_name = request.form.get("first_name") last_name = request.form.get("last_name") gender = int(request.form.get("gender")) age = int(request.form.get("age")) zipcode = request.form.get("zipcode") hashed_password = hash_password(password, email) # create an instance of User with email, password, username, etc. as attributes user = User(email=email, password=hashed_password, username=username, first_name=first_name, last_name=last_name, gender=gender, age=age, zipcode=zipcode) # check for email in db, if not there, add it to db if dbsession.query(User).filter_by(email = email).first(): flash("This email address is already in use. Please try again.") return redirect("/sign_up") else: dbsession.add(user) dbsession.commit() created_user = dbsession.query(User).filter_by(email = email).first() session["login"] = created_user.id session["user"] = created_user return redirect("/pick_genres")
def create_tables(): Base.metadata.create_all(engine) u = User(email='*****@*****.**', username='******') u.set_password('unicorn') session.add(u) u2 = User(email='*****@*****.**', username='******') u2.set_password('unicorn') session.add(u2) b = Book( title='The Book of Steph', amazon_url='www.smstroud.com', owner_id=1 ) session.add(b) b2 = Book( title='Stroud\'s Story', amazon_url='www.smstroud.com', owner_id=1, current_borrower=2 ) b_h = BorrowHistory(book_id=2, borrower_id=2, date_borrowed=datetime.now) # p = Post(title='test post', body='body of a test post.') # u.posts.append(p) session.add(b) session.add(b2) b2.borrow_history.append(b_h) session.commit()
def cadastrar(self): #sempre chama o dicionario em funcao da funcao self.nome = raw_input("Digite o hostname do server: ") self.descricao = raw_input("Digite descricao para o server: ") #self.ip = raw_input("Digite IP para o server: ") try: ssh = SSH() docker = Docker() ssh.executa_comando(docker.criar(self.nome)) container = ssh.executa_comando(docker.pegar_ip(self.nome)) container = json.loads(container) self.ip = container[0].get("NetworkSettings").get("IPAddress") s = ServidorModel(self) #s.nome = servidor.get("nome") #s.descricao = servidor.get("descricao") #s.ip = servidor.get("ip") session.add(s) session.commit() print "Servidor cadastrado com sucesso!" except Exception as e: session.rollback() print "Falhou ao cadastrar servidor: ",e
def input_match_terms(): """ This allows the user to enter and edit the match percent and max salary percent match of their 401k match. """ if g.logged_in is True: # If user selects that they do not have a 401k match, skip # all 401k match-related questions. match_401k = m_session.query(model.User).filter_by( id=g.user.id).first().company_match if match_401k == "Yes": if g.inputs is True: match_percent = m_session.query(model.User).filter_by( id=g.user.id).first().match_percent match_salary = m_session.query(model.User).filter_by( id=g.user.id).first().match_salary else: match_percent = match_salary = 0 return render_template( "input_match_terms.html", match_percent=match_percent, match_salary=match_salary) else: match_percent = match_salary = 0 update_user = m_session.query(model.User).filter_by( id=g.user.id).update({model.User.match_percent: match_percent, model.User.match_salary: match_salary}) m_session.commit() return redirect("/input/risk_tolerance") else: return redirect("/login")
def save_assets(): """ Pulls assets from user input (as a post request), save to database, and routes to next question (/results will perform the calculations). """ form = AssetsForm(request.form) if form.validate_on_submit(): assets = float(request.form["assets"]) # Checks that user's assets are getting updated each time they change # their input, and not getting added to the database. user_assets = m_session.query(model.UserBanking).filter_by( user_id=g.user.id).first() if user_assets is not None: update_assets = m_session.query(model.UserBanking).filter_by( user_id=g.user.id).update( {model.UserBanking.inputted_assets: assets}) else: new_account = model.UserBanking( user_id=g.user.id, inputted_assets=assets, checking_amt=0, savings_amt=0, IRA_amt=0, comp401k_amt=0, investment_amt=0) m_session.add(new_account) m_session.commit() return redirect("/input/income") else: flash("Please enter an integer. No commas or symbols.") return redirect("/input/assets")
def load_globalcounts(list_of_wordcounts): """ Adds wordcounts for all unique words. There should only be one row per unique word. """ # i = 0 for localcount_dict in list_of_wordcounts: # if i < 5: for word, count in localcount_dict.iteritems(): item = session.query(GlobalCount).filter(GlobalCount.term == word).first() if item: print "%r is already in globalcounts. Updating count..." % word # update the global count for this word, because we have added new songs with more occurrences of this word q = session.query(LocalCount.term, func.sum(LocalCount.count)) q = q.group_by(LocalCount.term) q = q.filter(LocalCount.term == word) results = q.all() # print "Current count for %r is %d" % (item.term, item.count) item.count = results[0][1] print "Updating %r's count to %d" % (item.term, item.count) session.commit() else: print "%r not in globalcounts table, creating new row" % word qq = session.query(LocalCount.term, func.sum(LocalCount.count)) qq = qq.group_by(LocalCount.term) qq = qq.filter(LocalCount.term == word) resultsresults = qq.all() countcount = resultsresults[0][1] new_row = GlobalCount(term = word, count = countcount) session.add(new_row) # you must commit before you query the same word/item again! session.commit()
def __get_calculator(self, matrix): calculator = Calculator() calculator.algorithm_name = matrix.algorithm calculator.filter_name = matrix.matrix_filter session.add(calculator) session.commit() return calculator
def sync_photo(id, flickr, check_dirty=False): print id db_photo = session.query(Photo).filter(Photo.flickr_id == id).first() if db_photo and not check_dirty: print 'Photo is already local.' return db_photo photo = simplejson.loads(flickr.photos_getInfo(photo_id=id, nojsoncallback=1)) p = photo['photo'] (id, title) = (int(p['id']), p['title']['_content']) url = url_for_photo(p) page_url = p['urls']['url'][0]['_content'] description = """%s\n %s Taken: %s in %s Flickr: %s""" % (p['title']['_content'], p['description']['_content'], p['dates']['taken'], loc_to_string(p), page_url) if db_photo: print "Photo %s already exists" % id if db_photo.title == title and db_photo.description == description: return db_photo db_photo.dirty = True db_photo.title = title db_photo.description = description else: url = url_for_photo(p) db_photo = Photo(title= title, description=description, flickr_id=id, dirty=False, url=url) if not p['visibility']['ispublic']: db_photo.private = True session.add(db_photo) sync_tags(db_photo, p) session.commit() return db_photo
def update_page(page, chapter): print "Calling %s" % page.page_link response = urllib2.urlopen(page.page_link) if not (response.code >= 200 and response.code < 300): raise Exception("Could not retrieve the page for link . %s" % page.page_link) print "Response %s" % response.code content = response.read() (next_link, image) = get_image_and_next_link(content, page.page_link) while next_link is not None: if image is None: raise Exception("Something went wrong with the lack of image for given page") page.image_link = image next_page = Page(next_link, chapter) session.add(next_page) session.commit() print "Added Page[%d] %s" % (next_page.id, next_page.page_link) page.next_page_id = next_page.id session.add(page) session.commit() print "Update page %d with image %s" % (page.id, page.image_link) page = next_page response = urllib2.urlopen(page.page_link) if not (response.code >= 200 and response.code < 300): raise Exception("Could not retrieve the page for link . %s" % page.page_link) content = response.read() (next_link, image) = get_image_and_next_link(content, page.page_link)
def load_rss(): # query the db: how long is it? Use this number later to empty db of old stories exstories = db_session.query(Stories).all() last_id = exstories[-1].id sources = {"NPR News": 'http://www.npr.org/rss/rss.php?id=1001', "BBC": 'http://feeds.bbci.co.uk/news/rss.xml'} for source in sources: print source # use feedparser to grab & parse the rss feed parsed = feedparser.parse(sources[source]) print "parsed" # go through each entry in the RSS feed to pull out elements for Stories for i in range(len(parsed.entries)): title = parsed.entries[i].title url = parsed.entries[i].link source = source # pull abstract, parse out extra crap that is sometimes included abstract = (parsed.entries[i].description.split('<'))[0] print abstract # connect with db story = db_session.Stories(title=title, url=url, abstract=abstract, source=source) print "connected with db model??" # add story to db db_session.add(story) print "added story to db" # commit db_session.commit() print "committed" # delete from db old stories for l in range(1,last_id+1): db_session.query(Stories).filter_by(id=l).delete() db_session.commit()
def register_user(): if request.method == 'POST': email = request.form['email'] password = request.form['password'] confirm_password = request.form['confirm_password'] age = request.form['age'] gender = request.form['gender'] job = request.form['job'] zipcode = request.form['zipcode'] if password != confirm_password: flash("Your passwords do not match. Please re-type all your information.") return redirect("/sign_up") existing = db_session.query(User).filter_by(email=email).first() if existing: flash("Email is already in use.", "error") return redirect(url_for("display_search")) #create a new user object user = User(email=email, password=password, age=age, gender=gender, job=job, zipcode=zipcode) db_session.add(user) db_session.commit() db_session.refresh(user) session['user_id'] = user.id # save a cookie to the browser return redirect(url_for("display_search")) return redirect(url_for("login"))
def index_new_book_info(book_info): ''' This function updates a dictionary containing all tokens for a book. New search terms are saved to the SearchTerm table. The key is the token, the value is a list of document IDs that contain the token. ''' book_info_ids_by_token = {} tokens = get_tokens_from_book_info(book_info) for token in tokens: if not token in book_info_ids_by_token: book_info_ids_by_token[token] = [] book_info_ids_by_token[token].append(book_info.id) for token, book_ids in book_info_ids_by_token.items(): # TODO: check the DB first before creating new search term search_term = SearchTerm( token=token, num_results=len(book_ids), # creates a json string from the book_ids array document_ids=json.dumps(book_ids), ) session.add(search_term) session.commit() return book_info_ids_by_token
def recreate_index(): ''' This function indexes the book_info table of the database. I'm implimenting tf-idf functionality, so I save the number of documents in which the term shows up, and I also save a record of the specific documents that contain the term. ''' book_infos = BookInfo.query.all() freq_by_id_by_token = defaultdict(Counter) for info in book_infos: tokens = get_tokens_from_book_info(info) for token in tokens: freq_by_id_by_token[token][info.id] += 1 # deletes all search terms before recreating index SearchTerm.query.delete() for token, frequency_by_id in freq_by_id_by_token.items(): search_term = SearchTerm( token=token, num_results=len(frequency_by_id), # creates a json string from the `frequency_by_id` dict document_ids=json.dumps(frequency_by_id), ) session.add(search_term) session.commit()
def add_page_pair_to_database(from_page, to_page, limit): with db_lock: cou = session.query(Page.id).filter(Page.url == from_page).scalar() cou1 = session.query(Page.id).filter(Page.url == to_page).scalar() if cou is None: new_page_from = Page(url=from_page, text="", rank=0) session.add(new_page_from) session.flush() id0 = new_page_from.id else: id0 = cou if cou1 is None: allowed = limit < 1 or limit > session.query(Page).count() if not allowed: return new_page_to = Page(url=to_page, text="", rank=0) session.add(new_page_to) session.flush() id1 = new_page_to.id else: id1 = cou1 new_relation = Relation(page_id = id0, destination_id = id1) # print(new_relation.page_id.id) session.add(new_relation) session.commit()
def create_secondary_facebook_album(set, facebook): title = "%s (#%s)" % (set.title, len(set.fb_albums) + 2) print "Created %s" % title data = facebook.photos.createAlbum(name=title, description=set.description, visible="everyone") set.fb_albums.append(FBAlbum(facebook_id=int(data['aid']))) session.commit() return int(data['aid'])
def input_match_401k(): """ This allows the user to enter and edit if their company has a 401k match. """ if g.logged_in is True: # If user selects that they do not have a company 401k, skip # all 401k-related questions. comp_401k = m_session.query(model.User).filter_by( id=g.user.id).first().company_401k if comp_401k == "Yes": if g.inputs is True: match_401k = m_session.query(model.User).filter_by( id=g.user.id).first().company_match else: match_401k = 0 return render_template( "input_match_401k.html", match_401k=match_401k) else: match_401k = "No" match_percent = match_salary = 0 update_user = m_session.query(model.User).filter_by( id=g.user.id).update({model.User.company_match: match_401k, model.User.match_percent: match_percent, model.User.match_salary: match_salary}) m_session.commit() return redirect("/input/risk_tolerance") else: return redirect("/login")
def load_songs(lyrics_data): """ Add songs to the songs table. """ # i = 0 # go through each song dictionary and extract data for song_dictionary in lyrics_data: # if i < 5: # check whether the song already exists in the database if session.query(Song).filter(Song.url == song_dictionary['url']).first(): print "%r is already in the database!" % song_dictionary['songname'] else: # let's turn this song... into a Song! # make a new row in the songs table url = song_dictionary['url'] artist = song_dictionary['artist'] songname = song_dictionary['songname'] new_song = Song(url = url, artist = artist, songname = songname) session.add(new_song) print "SUCCESS! %r is such a jam." % new_song.songname # i += 1 session.commit()
def load_localcounts(lyrics_data, list_of_wordcounts): """ Adds local wordcounts for each song. """ # i = 0 for song_dictionary in lyrics_data: # if i < 5: url = song_dictionary['url'] # put on your counting shoes for k, v in song_dictionary.iteritems(): lyrics = song_dictionary['lyrics'] unique_words = {} for line in lyrics: line = line.lower() words = re.findall('\w+', line) # unique words for each song for word in words: if unique_words.get(word): unique_words[word] += 1 else: unique_words[word] = 1 # make all the localcount rows for that song for word, localcount in unique_words.iteritems(): new_row = LocalCount(song_id = url, term = word, count = localcount) print "Adding %r with count of %r" % (new_row.term, new_row.count) session.add(new_row) # i += 1 session.commit() list_of_wordcounts.append(unique_words) return list_of_wordcounts
def import_test_scores(): for file_path in filter(lambda x: x.startswith('profile_test_score'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(f"test score {idx}") c = TestScore() c.id = uuid.uuid4().hex c.created = row['created'] c.updated = row['updated'] c.profile_id = row['profile_id'] c.name = row['name'] c.score = row['score'] c.date_on = row['date_on'] = row['date_on'] if not pd.isnull( row['date_on']) else None c.description = row['description'] session.add(c) session.commit()
def editCatalog(catalog_id): """ A route to edit a specific catalog """ editCatalog = session.query(Catalog).filter_by(id=catalog_id).one_or_none() if editCatalog is None: flash("The catalog you are looking for does not exist.") return redirect(url_for('catalog.showAllCatalogs')) if editCatalog.user_id != login_session['user_id']: flash("You are not authorized to edit.") return redirect(url_for('catalog.showAllCatalogs')) if editCatalog != [] and request.method == 'POST': editCatalog.name = request.form['editCatalogName'] session.add(editCatalog) session.commit() flash(editCatalog.name + " is edited!") return redirect(url_for('catalog.showAllCatalogs')) elif editCatalog != [] and request.method == 'GET': return render_template('catalogs_edit.html', catalog=editCatalog)
def import_honour_awards(): for file_path in filter(lambda x: x.startswith('profile_honour_award'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(f"honour award {idx}") c = HonourAward() c.id = uuid.uuid4().hex c.created = row['created'] c.updated = row['updated'] c.profile_id = row['profile_id'] c.title = row['title'] c.issuer = row['issuer'] c.issued_on = row['issued_on'] if not pd.isnull( row['issued_on']) else None c.description = row['description'] session.add(c) session.commit()
def deleteCatalog(catalog_id): """ A route to delete a specific catalogs """ deleteCatalog = session.query(Catalog).filter_by( id=catalog_id).one_or_none() if deleteCatalog is None: flash("The catalog you are looking for does not exist.") return redirect(url_for('catalog.showAllCatalogs')) if deleteCatalog.user_id != login_session['user_id']: flash("You are not authorized to delete.") return redirect(url_for('catalog.showAllCatalogs')) if deleteCatalog != [] and request.method == 'POST': session.delete(deleteCatalog) session.commit() flash(deleteCatalog.name + " is deleted!") return redirect(url_for('catalog.showAllCatalogs')) elif deleteCatalog != [] and request.method == 'GET': return render_template('catalogs_delete.html', catalog=deleteCatalog)
def average_score_calculation(store_id): try: store = session.query(Store).filter(Store.id == store_id).first() store_reviews = session.query(StoreReview).filter( StoreReview.store_id == store_id).all() average_score = 0 for review in store_reviews: average_score += review.score average_score = average_score // len(store_reviews) store.average_score = average_score session.commit() except SQLAlchemyError: return abort(500, "database error")
def import_profiles(): for file_path in filter(lambda x: x.startswith('profile-'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(idx) p = Profile() p.id = row['id'] p.created = row['created'] p.updated = row['updated'] p.city = row['city'] p.first_name = row['first_name'] p.last_name = row['last_name'] p.headline = row['headline'] p.state = row['state'] p.summary = row['summary'] session.add(p) session.commit()
def save_tweet(): tweets = get_tweet() for tweet in tweets: tweet_id = tweet["tweet_id"] if session.query(Tweet).filter(Tweet.tweet_id == tweet_id).first(): break tw = Tweet() tw.tweet_id = tweet["tweet_id"] tw.twitter_id = tweet["twitter_id"] tw.twitter_name = tweet["twitter_name"] tw.tweeted_at = tweet["datetime"] tw.rank_tier = tweet["rank_tier"] tw.player_name = tweet["main"]["player_name"] tw.character = tweet["main"]["character"] tw.rank = tweet["main"]["rank"] tw.vsid = tweet["main"]["vsid"] tw.comment = tweet["main"]["comment"] session.add(tw) session.commit()
def save_income(): """ Pulls income from user input (as a post request), save to database, and routes to next question (/results will perform the calculations). """ form = IncomeForm(request.form) if form.validate_on_submit(): income = float(request.form["income"]) # Find user id using f_session and then update the database with the # user's financial inputs update_user = m_session.query(model.User).filter_by( id=g.user.id).update({model.User.income: income}) m_session.commit() return redirect("/input/comp_401k") else: flash("Please enter an integer. No commas or symbols.") return redirect("/input/income")
def import_publications(): for file_path in filter(lambda x: x.startswith('profile_publication'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(f"publication {idx}") c = Publication() c.id = uuid.uuid4().hex c.created = row['created'] c.updated = row['updated'] c.profile_id = row['profile_id'] c.name = row['name'] c.publisher = row['publisher'] c.published_on = row['published_on'] if not pd.isnull( row['published_on']) else None c.description = row['description'] c.url = row['url'] session.add(c) session.commit()
def add_reason(): """add user and language desired to dbsession and redirect to profile.html""" reason = request.form.get("reason") session['reason'] = reason usr = User(name=session["name"], email=session["email"], country_code=session["country_code"], mother_tongue_code=session["mother_tongue_code"], reason=session['reason'], age='', sex='', occupation='', current_city='', current_country='', origin_city='', origin_country='', about_txt='', profile_url='') usr.set_password(session['password']) dbsession.add(usr) dbsession.commit() lang = dbsession.query(Language).filter_by( language_name=session.get('language')).first() lang_desired = Language_desired(user_id=usr.id, language_code=lang.language_code, level=session['level']) dbsession.add(lang_desired) dbsession.commit() #clear session to get rid of superfluous info session.clear() #add info to session session["login"] = usr.name session["mother_tongue"] = usr.language.language_name return redirect("/profile")
def import_orgs(): for file_path in filter(lambda x: x.startswith('profile_organization'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(f"org {idx}") c = Organization() c.id = uuid.uuid4().hex c.created = row['created'] c.updated = row['updated'] c.profile_id = row['profile_id'] c.starts_at = row['starts_at'] if not pd.isnull( row['starts_at']) else None c.ends_at = row['ends_at'] if not pd.isnull( row['ends_at']) else None c.name = row['name'] c.title = row['title'] c.description = row['description'] session.add(c) session.commit()
def get_kv_id(self, key, value): if key in self.cache: if value in self.cache[key]: return self.cache[key][value] else: self.cache[key] = {} for key_value in KeyValue.where(key=key).all(): self.cache[key][key_value.value] = str(key_value.id) if value in self.cache[key]: return self.cache[key][value] if not value: return 0 key_value_id = KeyValue.where(key=key, value=value).value(KeyValue.id) if not key_value_id: KeyValue.create(key=key, value=value) session.commit() return self.get_kv_id(key, value) self.cache[key][value] = key_value_id return key_value_id
def fetch_profile(user_url): response = requests.request("GET", user_url, headers=headers) soup = BeautifulSoup(response.text, features="lxml") try: info_str = soup.select( "div[class='u'] table td span[class='ctt']")[0].get_text() name, location = profile_re_pattern.findall(info_str)[0] logging.info("name: {}, location: {}".format(name, location)) try: user = User(uid, name, location) ele = FollowQueue(uid) session.add(user) session.add(ele) session.query(InfoQueue).filter(InfoQueue.uid == uid).delete() logging.info("info dequeued {}".format(uid)) session.commit() except IntegrityError: session.rollback() logging.info("repeat primary key") except IndexError: logging.info("Index out of range")
def saveToSQLite(self,**kwargs): if kwargs['id'] is None: print '程序出错,请检查' return False #检测数据是否存在 law = session.query(Law).filter(Law.id == kwargs['id']).first() if not law is None: pass print '数据已存在,不必重复保存' else: law = Law(id=kwargs['id'], title=kwargs['title'], url=kwargs['url'], wenhao=kwargs['wenhao'], publish_date=kwargs['publish_date'], done_date=kwargs['done_date'], publish_department=kwargs['publish_department'], law_class=kwargs['law_class'], zhengwen=kwargs['zhengwen']) session.add(law) session.commit()
def update_series_rating(): value = request.form.get("value") series_id = request.form.get("series_id") user_id = request.form.get("user_id") count = DB.query(model.Rating).filter_by(user_id=user_id, series_id=series_id).count() user_series_count = DB.query(UserSeries).filter_by(user_id=user_id, series_id=series_id).count() if count == 0: #can only rate if you've added it to one of your watched lists if user_series_count != 0: new_rating = model.Rating(series_id=series_id, user_id=user_id, value=value) DB.add(new_rating) DB.commit() else: rating = DB.query(model.Rating).filter_by(user_id=user_id, series_id=series_id).one() rating.value = value DB.add(rating) DB.commit() return "successfully updated rating!"
def auth_session(request: Request): token = oauth.fetch_token( 'https://accounts.google.com/o/oauth2/token', authorization_response=str(request.url), # Google specific extra parameter used for client # authentication client_secret=client_secret) header = jwt.get_unverified_header(token['id_token']) cert_str = requests.get('https://www.googleapis.com/oauth2/v1/certs').json( )[header['kid']].encode() cert_obj = load_pem_x509_certificate(cert_str) pub_key = cert_obj.public_key() payload = jwt.decode(token['id_token'], pub_key, algorithms=['RS256'], audience=client_id) if payload['iat'] - 60 < time.time() < payload['exp']: if payload['email_verified']: response = RedirectResponse('/shop/goods/#') id_ = payload['sub'] user = session.query(Member).filter(Member.id == id_).first() if not user: session.add(Member(id=id_, email=payload['email'])) session.commit() response.set_cookie('kw_id', id_, max_age=token['expires_in']) return response else: raise jwt.PyJWTError('email_verified must be true') else: raise jwt.PyJWTError( f'this token is invalid at present {payload["iat"]} < {time.time()} < {payload["exp"]}' )
def fetch_info(user_url): uid = None response = requests.request("GET", user_url, headers=headers) soup = BeautifulSoup(response.text, features="lxml") try: info_str = soup.select( "div[class='u'] table td span[class='ctt']")[0].get_text() uid = avatar_re_pattern.findall( soup.select("div[class='u'] table td a")[0]['href'])[0] name, location = profile_re_pattern.findall(info_str)[0] logging.info("uid: {}, name: {}, location: {}".format( uid, name, location)) try: user = User(uid, name, location) queue_follow(uid) # ele = FollowQueue(uid) session.add(user) # session.add(ele) session.commit() except IntegrityError: session.rollback() logging.info("repeat primary key") except IndexError: logging.error("Index out of range") t = 0 with open('log/info_consumer/index_error_times.log', 'r') as f: t = int(f.read()) + 1 if t > 4: first_info_obj = session.query(InfoQueue).order_by( InfoQueue.create_time).first() session.delete(first_info_obj) session.commit() logging.info( 'remove first obj in info queue since there is some problems' ) with open('log/info_consumer/index_error_times.log', 'w') as f: f.write(str(t)) exit(-1) return uid
def handle_audit_request(audit_id): uid, err = get_oemid(request=request) if err is not None: return jsonify(UNAUTH_RESULT) ctx.current_user = DccaUser.get_by_id(uid) check_json(request) if not ctx.current_user.admin: return jsonify({ 'status': 'fail', 'message': 'Not admin, cannot access' }) action = get_json(request).get('action') audit = DccaSolutionAudit.get(audit_id) if action == 'accept': approved = True if audit.to_public: audit.solution.is_public = True else: audit.solution.is_public = False else: approved = False audit.approved = approved audit.status = True try: session.commit() except Exception as e: err_msg = "Fail to handle the audit request. {}".format(str(e)) raise DCCAException(err_msg) return jsonify({ 'status': 'success', 'message': 'Handle the audit request successfully' })
def start_crawler(self): start = time.time() # read robots.txt tmp = "http://" + self.base + "/robots.txt" self.robot_parser.set_url(tmp) self.robot_parser.read() # put first link self.q.put((0, self.website)) new_page = Page(url=self.website, text="", rank=0) session.add(new_page) session.commit() threads = [] for x in range(self.threads_number): t = threading.Thread(target=self.worker) t.daemon = True threads.append(t) t.start() # wait until the queue becomes empty self.q.join() # join threads for i in range(self.threads_number): self.q.put(None) for t in threads: t.join() session.commit() # empty the queue self.q.queue.clear() end = time.time() print("With", self.threads_number, "threads elapsed : ", end - start) print("Total number of pages processed :", self.current_pages_processed)
def import_education(): for file_path in filter(lambda x: x.startswith('profile_education'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(f"education {idx}") c = Education() c.id = str(uuid.uuid4()) c.created = row['created'] c.updated = row['updated'] c.starts_at = row['starts_at'] if not pd.isnull( row['starts_at']) else None c.ends_at = row['ends_at'] if not pd.isnull( row['ends_at']) else None c.field_of_study = row['field_of_study'] c.degree_name = row['degree_name'] c.school = row['school'] c.school_profile_url = row['school_profile_url'] c.profile_id = row['profile_id'] session.add(c) session.commit()
def dbadd_user_rating(user, rating, movie): """add or update movie rating while logged in""" # print user # print movie user = dbsesh.query(model.User).filter_by(email=user).first() movie = dbsesh.query(model.Movie).filter_by(name=movie).first() # print dir(user) # print user.ratings # print movie # reviewed_movies = [] # print reviewed_movies # rating = r.rating # for i in range(len(user.ratings)): # reviewed_id = user.ratings[i].movie_id # reviewed_movies.append(reviewed_id) # if movie.id in reviewed_movies: # if len(user.ratings)>1: # for i in range(len(user.ratings)): # if movie.id == user.ratings[i].movie_id: # user.ratings[i].rating = rating # print "user.ratings----------------------------\n", user.ratings[0].movie_id # print "move----------------------------\n", dir(movie) r = model.Rating() r.user_id = user.id r.rating = rating r.movie_id = movie.id dbsesh.add(r) dbsesh.commit() return r
def remove_tasks_by_ids(): uid, err = get_oemid(request=request) if err is not None: return jsonify(UNAUTH_RESULT) ctx.current_user = DccaUser.get_by_id(uid) task_ids_str = request.args.get('task_ids') empty_check(task_ids_str, error_message='The "task_ids" cannot be empty.') task_ids = task_ids_str.split(',') tasks = EsTask.query_in(task_ids) if len(tasks) < len(task_ids): return jsonify({ 'status': 'fail', 'message': 'Not exist or cannot access', 'unauthorized': list(set(task_ids) - set([t.id for t in tasks])) }) try: data = [] for task in tasks: task.logical_delete = True task.ended_at = datetime.utcnow() session.add(task) data.append(task.as_dict(schema=TaskSchema)) session.commit() except Exception: raise DCCAException('Fail to remove task') return jsonify({ 'status': 'success', 'message': 'Success to remove tasks', 'tasks': data })
def add_to_user_series_table(): user_id = int(request.form.get("user_id")) series_id = int(request.form.get("series_id")) state = request.form.get("state") new_user_series = model.UserSeries(user_id=user_id, series_id=series_id, state=state) count = DB.query(UserSeries).filter_by(series_id=series_id, user_id=user_id).count() if count == 0: DB.add(new_user_series) DB.commit() print "added new user series!" else: db_duplicate = DB.query(UserSeries).filter_by(series_id=series_id, user_id=user_id).one() if db_duplicate.state != state: db_duplicate.state = state DB.add(db_duplicate) DB.commit() print "Changed to a new state!" return "success!"
def post(self): id = self.get_argument("id", None) title = tornado.escape.utf8(self.get_argument("title")) text = tornado.escape.utf8(self.get_argument("content")) html = markdown.markdown(text) clazz = tornado.escape.utf8(self.get_argument("clazz")) if id: article = session.query(Article).filter_by(id=id).first() if not article: raise tornado.web.HTTPError(404) article.title = title article.html = html article.text = text article.clazz = clazz session.add(article) session.commit() return self.redirect("/article/%s" % id) article = Article(title=title, text=text, html=html, clazz=clazz, user_id=self.get_current_user().id) session.add(article) session.commit() id = article.id return self.redirect("/article/%s" % article.id)
def dequeue_info(): first_info_obj = session.query(InfoQueue).order_by( InfoQueue.create_time).first() if first_info_obj != None: # uid = first_info_obj.uid user_url = first_info_obj.url logging.info("got first url in info queue {}".format(user_url)) uid = fetch_info(user_url) logging.info("got info of {}".format(uid)) relations_in_buffer = session.query(InfoQueue).filter(InfoQueue.url==user_url)\ .all() for relation in relations_in_buffer: relation_obj = None if relation.follow_or_fan == FOLLOWEE: relation_obj = UserRelationship(uid, relation.source_uid) logging.info("build relationship between {} and {}".format( uid, relation.source_uid)) elif relation.follow_or_fan == FOLLOWER: relation_obj = UserRelationship(relation.source_uid, uid) logging.info("build relationship between {} and {}".format( relation.source_uid, uid)) try: session.add(relation_obj) session.delete(relation) logging.info("dequeue relationship between {} and {}".format( relation.source_uid, uid)) session.commit() except IntegrityError: session.rollback() logging.info("repeat primary key") session.delete(relation) logging.info( "re-dequeue relationship between {} and {}".format( relation.source_uid, uid)) session.commit() with open('log/info_consumer/index_error_times.log', 'w') as f: f.write('0')
def display_client_file(client): choice = None while choice != 0: choice = view.display_client_file(client) if choice == 0: # user wants to return to home return elif choice == -1: # user wants to add an account account_type = view.display_add_account() if account_type == 1: # debit account account = DebitAccount(client.client_id) else: # saving account account = SavingAccount(client.client_id, 0.03) session.add(account) session.commit() else: # user want to see choice - 1 account of client index = choice - 1 account = client.accounts[index] display_account(account)
def register(): if request.method == 'GET': return render_template('register.html') else: username = request.form.get('username') user = model_session.query(model.User).filter_by(username=username).first() if user != None: flash('This username is already taken.') return redirect(url_for('register')) else: password = request.form.get('password') verify_password = request.form.get('verify_password') if verify_password == password: new_user = model.User(username=username) new_user.set_password(password) model_session.add(new_user) model_session.commit() model_session.refresh(new_user) return redirect(url_for('login')) else: flash('Passwords do not match!') return redirect(url_for('register'))
def open_new_trade(user, currency): """ Returns a new trade """ user = get_user(msg=user) affiliate = get_affiliate(user.chat) if affiliate != None: affiliate = affiliate.id trade = Trade( id=generate_id(), seller=user.id, currency=currency, payment_status=False, created_at=str(datetime.now()), updated_at=str(datetime.now()), is_open=True, affiliate_id=affiliate, ) session.add(trade) session.commit()
def import_experiences(): for file_path in filter(lambda x: x.startswith('profile_experience'), get_parquet_file_paths()): table = pq.read_table(file_path) df = table.to_pandas() for idx, row in df.iterrows(): print(f"exp {idx}") c = Experience() c.id = uuid.uuid4().hex c.created = row['created'] c.updated = row['updated'] c.starts_at = row['starts_at'] if not pd.isnull( row['starts_at']) else None c.ends_at = row['ends_at'] if not pd.isnull( row['ends_at']) else None c.profile_id = row['profile_id'] c.company = row['company'] c.company_profile_url = row['company_profile_url'] c.title = row['title'] c.location = row['location'] c.description = row['description'] session.add(c) session.commit()
def create_tables(): Base.metadata.create_all(engine) u = User(email='*****@*****.**', username='******') u.set_password('unicorn') session.add(u) u2 = User(email='*****@*****.**', username='******') u2.set_password('unicorn') session.add(u2) b = Book(title='The Book of Steph', amazon_url='www.smstroud.com', owner_id=1) session.add(b) b2 = Book(title='Stroud\'s Story', amazon_url='www.smstroud.com', owner_id=1, current_borrower=2) b_h = BorrowHistory(book_id=2, borrower_id=2, date_borrowed=datetime.now) # p = Post(title='test post', body='body of a test post.') # u.posts.append(p) session.add(b) session.add(b2) b2.borrow_history.append(b_h) session.commit()