def main(): from_date = datetime.date(*map(int, sys.argv[1].split('-'))) to_date = datetime.date(*map(int, sys.argv[2].split('-'))) assert from_date <= to_date quote = decode_quote(sys.argv[3]) #print quote session = Session() lit_years = {} date = from_date while date <= to_date: found = False session.rollback() lit_date = get_lit_date(date, lit_years, session) masses = [] try: masses = lit_date.get_masses(strict=True) except SelectingMassException: pass for mass in masses: if found: break for reading in mass.readings: try: verses = decode_quote(reading.quote) except BadQuoteException: pass if quotes_intersect(quote, verses): print "%s: %s" % (date, reading.quote) found = True break date += datetime.timedelta(days=1)
def update_book(id): book = Session.query(Book).filter(Book.id == id) if not request.json or not book.first(): abort(400) Session.query(AuthorBookLink).filter( AuthorBookLink.book_id == request.json['id']).delete() authors = request.json['authors_id'] try: book.update({ 'id': request.json['id'], 'title': request.json['title'], 'updated_at': datetime.now() }) except IntegrityError: Session.rollback() return 'Id currently exists', 400 for author_id in authors: author = Session.query(Author).filter(Author.id == author_id) if not author: Session.rollback() return 'No author with id %i' % author_id Session.add( AuthorBookLink(author_id=author_id, book_id=request.json['id'])) Session.commit() return 'OK', 200
def update_author(id): author = Session.query(Author).filter(Author.id == id) if not request.json or not author.first(): abort(400) connected_books_ids = [] links = Session.query(AuthorBookLink).filter( AuthorBookLink.author_id == author.first().id) for link in links: connected_books_ids.append(link.book_id) links.delete() try: author.update({ 'id': request.json['id'], 'name': request.json['name'], 'updated_at': datetime.now() }) except IntegrityError: Session.rollback() return 'Id already exists', 400 for book_id in connected_books_ids: Session.add( AuthorBookLink(author_id=request.json['id'], book_id=book_id)) Session.commit() return 'OK', 200
def fit_process(uid): # create a new temporary model file fd, path = tempfile.mkstemp() # close the temporary model file descriptor as we don't need it os.close(fd) # give this process a dedicated session session = Session() try: ftclassifier.fit(session, uid, path) # sgdclassifier.fit(session, uid, path) # persist the model to the database with open(path, 'rb') as f: classifier = f.read() dal.update_classifier(session, uid, classifier) session.commit() except: session.rollback() raise finally: session.close() Session.remove() # delete the temporary model file os.unlink(path)
def populate(): """ Populates the database for the given user with sample data. """ try: id_token = request.form['id_token'] uid = verify_id_token(id_token) except KeyError: return "id_token required", status.HTTP_400_BAD_REQUEST except ValueError: return "id_token unrecognized", status.HTTP_400_BAD_REQUEST except auth.AuthError as exc: if exc.code == 'ID_TOKEN_REVOKED': return "id_token revoked", status.HTTP_400_BAD_REQUEST else: return "id_token invalid", status.HTTP_400_BAD_REQUEST session = Session() try: dal.populate(session, uid) session.commit() except: session.rollback() raise finally: session.close() Session.remove() classifier.fit(uid) return "Sample data added for user", status.HTTP_202_ACCEPTED
def reset(): """ Deletes all of the user's data from the database. """ try: id_token = request.form['id_token'] uid = verify_id_token(id_token) except KeyError: return "id_token required", status.HTTP_400_BAD_REQUEST except ValueError: return "id_token unrecognized", status.HTTP_400_BAD_REQUEST except auth.AuthError as exc: if exc.code == 'ID_TOKEN_REVOKED': return "id_token revoked", status.HTTP_400_BAD_REQUEST else: return "id_token invalid", status.HTTP_400_BAD_REQUEST session = Session() try: dal.delete(session, uid) session.commit() except: session.rollback() raise finally: session.close() Session.remove() return "User data deleted", status.HTTP_202_ACCEPTED
def get_announcements(course, url): '''Gets all new announcements Returns a list of all new announcements. ''' session = Session() try: r = s.get('https://edux.pjwstk.edu.pl/Announcements.aspx', stream=True) r.raise_for_status() new_announcements = extract_announcements(r.content) # All pairs of (timestamp, message) are saved to db # if they arent there already for (timestamp, message) in new_announcements: announcement = session.query(Announcement). \ filter_by(course=course, created_at=timestamp, message=message). \ first() if announcement is None: # This is what we care about announcement = Announcement( course=course, created_at=timestamp, message=message) session.add(announcement) print u'New announcement at {0}'.format(timestamp) yield (timestamp, message) session.commit() except Exception: session.rollback() raise finally: session.close()
def stats(): """ Get a list of all the given user's stats. """ try: id_token = request.form['id_token'] uid = verify_id_token(id_token) except KeyError: return "id_token required", status.HTTP_400_BAD_REQUEST except ValueError: return "id_token unrecognized", status.HTTP_400_BAD_REQUEST except auth.AuthError as exc: if exc.code == 'ID_TOKEN_REVOKED': return "id_token revoked", status.HTTP_400_BAD_REQUEST else: return "id_token invalid", status.HTTP_400_BAD_REQUEST session = Session() try: stats = dal.get_stats(session, uid) session.commit() except: session.rollback() raise finally: session.close() Session.remove() return json.dumps(stats), status.HTTP_200_OK
def parse_metadata(jobid): session = Session() file_obj, job = session.query(File, Job).filter(Job.id == jobid).first() job.status = 'working' session.commit() try: match = re.search(r'(.+)\.([a-z0-9]+)$', file_obj.abs_path.split('/')[-1], re.I) filename = match.group(1) extension = match.group(2) file_type = magic.from_file(file_obj.abs_path) mime_type = magic.from_file(file_obj.abs_path, mime=True) file_obj.name = filename file_obj.ext = extension file_obj.file_type = file_type file_obj.mime_type = mime_type # TODO: Need to figure out why this status isn't getting updated even though the file_obj is and the job stauts # is properly updating to 'working' above. job.staus = 'complete' print('session.dirty: {0}'.format(session.dirty)) session.commit() except: session.rollback() job.status = 'failed' session.commit()
def predict(): """ Predicts the text label of every value in the given list of unlabeled text. """ try: id_token = request.form['id_token'] uid = verify_id_token(id_token) except KeyError: return "id_token required", status.HTTP_400_BAD_REQUEST except ValueError: return "id_token unrecognized", status.HTTP_400_BAD_REQUEST except auth.AuthError as exc: if exc.code == 'ID_TOKEN_REVOKED': return "id_token revoked", status.HTTP_400_BAD_REQUEST else: return "id_token invalid", status.HTTP_400_BAD_REQUEST try: unlabeled_text = json.loads(request.form['unlabeled_text']) except KeyError: return "unlabeled_text required", status.HTTP_400_BAD_REQUEST except ValueError: return "unlabeled_text unrecognized", status.HTTP_400_BAD_REQUEST session = Session() try: predicted_labels = classifier.predict(session, uid, list(unlabeled_text.values())) session.commit() except: session.rollback() raise finally: session.close() Session.remove() predictions = dict(zip(unlabeled_text.keys(), predicted_labels)) return json.dumps(predictions), status.HTTP_200_OK
def main(): reading_id = int(sys.argv[1]) session = Session() reading = session.query(Reading).filter(Reading.id == reading_id).one() text = reading.text PrependStream(sys.stdout, ' ').write('"' + text.strip() + '"') sys.stdout.write('\n') session.rollback()
def save_cate(cate_name,cate_id): try: category = Categories(category_name=cate_name,category_id=cate_id) Session.add(category) Session.commit() except Exception as e: Session.rollback() raise e
def save_album(album_name,album_id,album_cover): try: category = Albums(album_name=album_name,album_id=album_id,album_cover=album_cover) Session.add(category) Session.commit() except Exception as e: Session.rollback() raise e
def db_session() -> Iterator[Session]: session = Session() try: yield session session.commit() except Exception: session.rollback() raise finally: session.close()
def save_wallpaper(title, file_name, image_url, id=None, category=None, album_id=None, album_name=None ,username=None): try: wallpaper = Wallpapers(title=title, image_url=image_url, id=id, category_id=category, user_id=username, album_id=album_id, album_name=album_name) Session.add(wallpaper) Session.commit() except Exception as e: Session.rollback() raise e
def check_db(loud=False, delete_orphans=False, fix=False): session = Session() check_orphans(session, loud, delete_orphans) check_readings(session, loud, fix) check_masses(session, loud) check_events(session, loud) #session.commit() session.rollback() session.close()
def get_db() -> Generator[SessionType, None, None]: # pragma: no cover db: Optional[SessionType] = None try: db = Session() # type: ignore yield db except: # pylint: disable=bare-except log.exception('Could not create db session!') else: try: db.commit() except: # pylint: disable=bare-except log.exception('Could not commit transaction on end of request!') db.rollback()
def session_scope(): """ Context for dealing with sessions. This allows the developer not to have to worry perse about closing and creating the session. """ session = Session() try: yield session session.commit() except: session.rollback() raise finally: session.close()
def load(file_): nvdxml = utilities.NvdXml() session = Session() reader = csv.reader(file_) next(reader, None) # Ignoring the header for row in reader: debug(row) cve = Cve(id=row[0], year=utilities.get_year(row[0]), product=row[1]) nvd_details = nvdxml.get_details(cve.id) if nvd_details: cve.cvss = Cvss() cve.cvss.access_complexity = nvd_details['access-complexity'] cve.cvss.access_vector = nvd_details['access-vector'] cve.cvss.authentication = nvd_details['authentication'] cve.cvss.availability_impact = nvd_details['availability-impact'] cve.cvss.confidentiality_impact = nvd_details[ 'confidentiality-impact' ] cve.cvss.integrity_impact = nvd_details['integrity-impact'] cve.cvss.score = nvd_details['score'] cve.cvss.exploitability_subscore = nvd_details[ 'exploitability-subscore' ] cve.cvss.impact_subscore = nvd_details[ 'impact-subscore' ] cve.bounty = Bounty() cve.bounty.amount = float(row[2].replace('$', '').replace(',', '')) session.add(cve) try: session.commit() except sqlalchemy.exc.IntegrityError as e: error('{} is a duplicate.'.format(cve.id)) session.rollback() else: warning('{} was not found in NVD.'.format(cve.id))
def update_daum_stock_news(): """다음뉴스 크롤링하여 DB 저장""" try: start_time = time.time() session = Session() last_id = session.query(StockNews.id).order_by( StockNews.date.desc(), StockNews.id.desc()).first() if last_id: last_id = last_id[0] logger.info('last_id: {}'.format(last_id)) news_list = get_news_list(last_id=last_id, limit=100) affected_rows = 0 for news in news_list: try: date = datetime.strptime(news.get('date'), '%y.%m.%d %H:%M') session.add( StockNews(id=news.get('doc_id'), offerer=news.get('offerer'), title=news.get('title'), date=date)) session.commit() affected_rows += 1 except Exception as e: logger.exception('duplicated: {}'.format(news)) session.rollback() execution_time = time.time() - start_time logger.info('execution_time: {}'.format(execution_time)) logger.info('{} rows added'.format(affected_rows)) except Exception as e: logger.exception('update_daum_stock_news', exc_info=True) session.rollback() slack.send_message('BATCH:update_daum_stock_news fail {}'.format(e)) finally: session.close()
def add_book(): if not request.json or Session.query(Book).filter( Book.id == request.json['id']).first(): abort(400) new_book = Book(id=request.json['id'], title=request.json['title'], created_at=datetime.now()) Session.add(new_book) authors = request.json['authors_id'] for author_id in authors: author = Session.query(Author).filter(Author.id == author_id) if not author: Session.rollback() return 'No author with id %i' % author_id Session.add( AuthorBookLink(author_id=author_id, book_id=request.json['id'])) Session.commit() return 'OK', 200
def main(): reading_id = int(sys.argv[1]) session = Session() bible_query = BibleQuery() reading = session.query(Reading).filter(Reading.id == reading_id).one() text = reading.text if reading.text is not None else "" editor = Editor() # Fix wrong quotation marks text = re.sub(ur'"([a-zA-ZàòùèéìÒÀÙÈÉÌ0-9])', ur'“\1', text, count=0) text = re.sub(ur'([a-zA-ZàòùèéìÒÀÙÈÉÌ0-9\.?!])"', ur'\1”', text, count=0) # From http://stackoverflow.com/questions/15120346/emacs-setting-comment-character-by-file-extension PrependStream(editor.tempfile, '# ').write(u'-*- coding: utf-8; comment-start: "#"; -*-\n') PrependStream(editor.tempfile, '# ').write(u'Quote: %s\n' % (reading.quote)) editor.tempfile.write(u'\n') editor.tempfile.write(text) editor.tempfile.write(u'\n') PrependStream(editor.tempfile, '# ').write(u'Useful characters: “”–\n\n') try: converted_quote = convert_quote_psalm_numbering(reading.quote, False) bible_text = bible_query.get_text(decode_quote(converted_quote, allow_only_chap=True)) except: PrependStream(editor.tempfile, '# ').write(u'Quote: %s\nCould not retrieve bible text\n' % (reading.quote)) print decode_quote(reading.quote, allow_only_chap=True) raise else: bible_text = "\n".join(map(lambda x: x.strip(), bible_text.split('\n'))) PrependStream(editor.tempfile, '# ').write(u'Quote: %s\nConverted quote: %s\nBible text:\n\n%s' % (reading.quote, converted_quote, bible_text)) editor.edit() new_text = u''.join(filter(lambda x: not x.startswith(u'#'), editor.edited_content)).strip() + u'\n' if editor.confirmation_request(new_text != reading.text): reading.text = new_text session.commit() else: session.rollback()
def tag_stock_code_to_news(): """증권뉴스에 관련종목 태깅""" try: start_time = time.time() session = Session() stock_list = session.query(종목_마스터.종목코드, 종목_마스터.종목이름).all() stock_news = session.query(StockNews).filter( StockNews.종목코드 == None, StockNews.created_at >= (datetime.now(SEOUL_TZ) - timedelta(hours=1))).all() # print(str(stock_news.statement.compile(dialect=mysql.dialect()))) affected_rows = 0 for news in stock_news: try: stock = next( (stock for stock in stock_list if stock[1] in news.title), None) if stock: logger.info('news: {} => {}'.format(news.title, stock[1])) news.종목코드 = stock[0] news.modified_at = datetime.now(SEOUL_TZ) affected_rows += 1 except Exception: logger.exception('tag_stock_code_to_news', exc_info=True) session.commit() execution_time = time.time() - start_time logger.info('execution_time: {}'.format(execution_time)) logger.info('{} rows added'.format(affected_rows)) except Exception as e: logger.exception('tag_stock_code_to_news', exc_info=True) session.rollback() slack.send_message('BATCH:tag_stock_code_to_news fail {}'.format(e)) finally: session.close()
def load(file_): nvdxml = utilities.NvdXml() session = Session() reader = csv.reader(file_) next(reader, None) # Ignoring the header for row in reader: debug(row) cve = Cve(id=row[0], year=utilities.get_year(row[0]), product=row[1]) nvd_details = nvdxml.get_details(cve.id) if nvd_details: cve.cvss = Cvss() cve.cvss.access_complexity = nvd_details['access-complexity'] cve.cvss.access_vector = nvd_details['access-vector'] cve.cvss.authentication = nvd_details['authentication'] cve.cvss.availability_impact = nvd_details['availability-impact'] cve.cvss.confidentiality_impact = nvd_details[ 'confidentiality-impact'] cve.cvss.integrity_impact = nvd_details['integrity-impact'] cve.cvss.score = nvd_details['score'] cve.cvss.exploitability_subscore = nvd_details[ 'exploitability-subscore'] cve.cvss.impact_subscore = nvd_details['impact-subscore'] cve.bounty = Bounty() cve.bounty.amount = float(row[2].replace('$', '').replace(',', '')) session.add(cve) try: session.commit() except sqlalchemy.exc.IntegrityError as e: error('{} is a duplicate.'.format(cve.id)) session.rollback() else: warning('{} was not found in NVD.'.format(cve.id))
def main(): session = Session() if len(sys.argv[1:]) == 1: mass_id = int(sys.argv[1]) masses = [session.query(Mass).filter(Mass.id == mass_id).one()] elif len(sys.argv[1:]) == 3: year, month, day = map(int, sys.argv[1:]) lit_years = {} lit_date = get_lit_date(datetime.date(year, month, day), lit_years, session) masses = lit_date.get_masses(strict=True) else: print >> sys.stderr, "Wrong number of arguments" sys.exit(1) fout = PrependStream(sys.stdout, '# ') for mass in sorted(masses, key=lambda x: x.order): num_reading = max(map(lambda x: x.order, mass.readings)) + 1 quotes = [] alt_quotes = [] print >> fout, "Mass #%d (%s) in event %s - ID: %d" % (mass.order, mass.title, mass.event.title, mass.id) for reading in sorted(mass.readings, key=lambda x: (x.order, x.alt_num)): print >> fout, " Lettura #%d.%d (%s): %s - ID: %d" % (reading.order, reading.alt_num, reading.title, reading.quote, reading.id) for i in xrange(num_reading): [reading] = filter(lambda x: x.order == i and x.alt_num == 0, mass.readings) if reading.only_on_sunday: alt_quotes[0].append(reading.quote) continue quotes.append(reading.quote) alt_quotes.append(map(lambda x: x.quote, sorted(filter(lambda x: x.order == i and x.alt_num > 0, mass.readings), key=lambda x: x.alt_num))) sys.stdout.write("citazioni: %s\n" % (json.dumps(quotes))) sys.stdout.write("citazioni_alt: %s\n" % (json.dumps(alt_quotes))) session.rollback()
def add(): """ Adds the given text to the database for a user, labeled with the given label text, and re-fits their classifier. """ try: id_token = request.form['id_token'] uid = verify_id_token(id_token) except KeyError: return "id_token required", status.HTTP_400_BAD_REQUEST except ValueError: return "id_token unrecognized", status.HTTP_400_BAD_REQUEST except auth.AuthError as exc: if exc.code == 'ID_TOKEN_REVOKED': return "id_token revoked", status.HTTP_400_BAD_REQUEST else: return "id_token invalid", status.HTTP_400_BAD_REQUEST try: label = request.form['label'] except KeyError: return "label required", status.HTTP_400_BAD_REQUEST try: text = request.form['text'] except KeyError: return "text required", status.HTTP_400_BAD_REQUEST session = Session() try: dal.add_labeled_text(session, uid, label, text) session.commit() except: session.rollback() raise finally: session.close() Session.remove() classifier.fit(uid) return "Labeled text added for user", status.HTTP_202_ACCEPTED
temp = int(i[3][j]) if (temp != 0): locations_applicable.append(locations[temp]) print(locations_applicable) #Record for table "craving_tips"; A Craving_Tip object record1 = CravingTip( **{ 'cz_tips': i[0], 'en_tips': i[1], 'methods': i[2], # 'location': i[3], # 'time': i[4], }) #Record for table "locations"; A "Location" Class object # TODO: fetch applicable locations locs = s.query(Location).filter( Location.name.in_(locations_applicable)).all() print(locs.name) # TODO: relate fetched locations to inserted tip record record1.locations.extend(locs) s.commit() # Attempt to commit all the records except Exception as e: print("\nblabla\n", str(e)) s.rollback() # Rollback the changes on error finally: s.close() # Close the connection
def get_quiz(course): '''Navigates to quiz Gets all quiz ''' session = Session() try: r = s.get('https://edux.pjwstk.edu.pl/Quiz.aspx') r.raise_for_status() # quiz = [] for (quiz_id, title, start_at, finish_at, duration, score) in extract_quiz(r.content): quiz = session.query(Quiz). \ filter_by(quiz_id=quiz_id). \ first() if quiz is None: quiz = Quiz( course=course, quiz_id=quiz_id, title=title, start_at=start_at, finish_at=finish_at, duration=duration, score=score ) session.add(quiz) print u'New quiz "{0}" {1} - {2}'.format( quiz.title, quiz.start_at, quiz.finish_at) send_notify(u'Quiz "{0.title}" at {1.title}'.format(quiz, course), u'''Quiz title: {0.title} Course: {1.title} Start: {0.start_at} Finish: {0.finish_at} Duration: {0.duration} Score: {0.score} '''.format(quiz, course)) if (quiz.title != title or quiz.start_at != start_at or quiz.finish_at != finish_at or quiz.duration != duration or quiz.score != score): send_notify(u'Quiz "{0.title}" changed'.format(quiz, course), u'''Quiz title: {new[title]} (old: {0.title}) Course: {1.title} Start: {new[start_at]} (old: {0.start_at}) Finish: {new[finish_at]} (old: {0.finish_at}) Duration: {new[duration]} (old: {0.duration}) Score: {new[score]} (old: {0.score}) '''.format(quiz, course, new={'title': title, 'start_at': start_at, 'finish_at': finish_at, 'duration': duration, 'score': score})) quiz.title = title quiz.start_at = start_at quiz.finish_at = finish_at quiz.duration = duration quiz.score = score session.add(quiz) session.commit() except Exception: session.rollback() raise finally: session.close()
def edit_month(year, month, single_day=None): session = Session() bible_query = BibleQuery() lit_years = {} editor = Editor() # From http://stackoverflow.com/questions/15120346/emacs-setting-comment-character-by-file-extension PrependStream(editor.tempfile, '# ').write(u'-*- coding: utf-8; comment-start: "#"; -*-\n') editor.tempfile.write(u'\n') def push_day(day): date = datetime.date(year, month, day) lit_date = get_lit_date(date, lit_years, session) events = map(lambda x: x[1], lit_date.competitors) print_lit_date(lit_date, PrependStream(editor.tempfile, u'# '), with_id=True) editor.tempfile.write(u'\n') editor.tempfile.write(json.dumps(map(lambda x: x.as_dict(), events), ensure_ascii=False, indent=2, sort_keys=True) + u'\n') editor.tempfile.write(u'---===---\n') editor.tempfile.write(u'\n') if single_day is not None: push_day(single_day) else: for day in real_itermonthdays(year, month): push_day(day) editor.edit() while True: lines = filter(lambda x: not x.startswith(u'#'), editor.edited_content) buf = u'' try: for line in lines: if line.strip() == u'---===---': data = json.loads(buf) for piece in data: from_dict(piece, session) buf = u'' else: buf += line session.flush() except: traceback.print_exc() sys.stdout.write("Error while parsing new content. Re-edit? [Y/n] ") answer = sys.stdin.readline().strip() if answer != '': answer = answer[0] if answer == 'n' or answer == 'N': sys.stdout.write("Aborting\n") sys.exit(0) else: sys.stdout.write("Re-editing...\n") session.rollback() edited_content = editor.edited_content editor = Editor() editor.tempfile.write("".join(edited_content)) editor.edit() else: break if editor.confirmation_request(session_has_pending_commit(session)): #reading.text = new_text session.commit() else: session.rollback()