def atomfeed(): with db.db_conn(config.DATABASE_NAME) as conn: articles = conn.execute_fetch('select title, body, published, slug from entries where published is not null order by published desc limit 20') if not articles: abort(404, 'Not found') response.content_type = 'application/atom+xml' return template('view/atom', articles=articles)
def draft(slug): try: homedir = '/home/mrshoe' blogdir = os.path.join(homedir, '.blog') f = open(os.path.join(blogdir, slug)) lines = f.readlines() title = lines[0].strip() published = lines[-1].strip().lower() == 'published' bodylines = lines[1:-1] if published else lines[1:] body = ''.join(bodylines) with db.db_conn(config.DATABASE_NAME) as conn: article = conn.execute_fetch('select published from entries where slug=%s', (slug.strip(),)) if len(article) > 1: abort(500, 'Multiple entries with that slug') elif len(article) == 1: if published: if article[0][0] is not None: # already published conn.execute('update entries set title=%s, body=%s where slug=%s', (title, body, slug)) else: conn.execute('update entries set title=%s, body=%s, published=now() where slug=%s', (title, body, slug)) else: conn.execute('update entries set title=%s, body=%s, published=null where slug=%s', (title, body, slug)) else: if published: conn.execute('insert into entries values (default, %s, %s, now(), %s)', (title, body, slug)) else: conn.execute('insert into entries values (default, %s, %s, NULL, %s)', (title, body, slug)) return template('view/blog', articles=[(title, body, datetime.datetime.now(), slug)], next=None, previous=None) except IOError: abort(404, 'No such entry')
def atomfeed(): with db.db_conn(config.DATABASE_NAME) as conn: articles = conn.execute_fetch( 'select title, body, published, slug from entries where published is not null order by published desc limit 20' ) if not articles: abort(404, 'Not found') response.content_type = 'application/atom+xml' return template('view/atom', articles=articles)
def get_url_from_db(sql): conn = db_conn(df['host'], df['port'], df['user'], df['password'], df['database'], df['charset']) links = conn.db_select(sql) url_list = [] for link in links: url_list.append(link) return url_list conn.db_close()
def comment(year, month, day, slug): with db.db_conn(config.DATABASE_NAME) as conn: articles = get_article(conn, year, month, day, slug) if request.forms['jstest'] == 'valid': conn.execute( "insert into comments values (default, %s, %s, %s, now() at time zone 'US/Pacific')", (articles[0][4], request.forms['commenter'].strip(), request.forms['comment'].strip())) redirect(request.path)
def check(): conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens` AS s LEFT JOIN ms_specimens_x_taxonomy AS x ON x.specimen_id = s.specimen_id LEFT JOIN ms_taxonomy_names AS n ON n.alt_id = x.alt_id """ r = db.db_execute(c, sql) mislinked = pandas.DataFrame(columns= ['specimen_id', 'MS_institution_code', 'iDB_institution_code', 'MS_collection_code', 'iDB_collection_code', 'MS_catalog_number', 'iDB_catalog_number', 'MS_genus', 'iDB_genus', 'MS_species', 'iDB_species' ]) # mislinked = pandas.read_csv('mislinked_specimens.csv') for s in r: if s['uuid']: if int(s['specimen_id']) in list(mislinked['specimen_id']): continue print(s['uuid']) resp = requests.get('https://search.idigbio.org/v2/view/records/' + s['uuid']) json = resp.json() # check institution code, collection code, specimen number, genus, and species if (get_db_field(s, 'institution_code') != get_json_field(json, ['indexTerms', 'institutioncode']).lower() or get_db_field(s, 'collection_code') != get_json_field(json, ['indexTerms', 'collectioncode']).lower() or get_db_field(s, 'catalog_number') != get_json_field(json, ['indexTerms', 'catalognumber']).lower() or get_db_field(s, 'genus') != get_json_field(json, ['indexTerms', 'genus']).lower() or get_db_field(s, 'species') != get_json_field(json, ['indexTerms', 'specificepithet']).lower()): print ('Found mislinked specimen') row = { 'specimen_id': s['specimen_id'], 'MS_institution_code': s['institution_code'], 'MS_collection_code': s['collection_code'], 'MS_catalog_number': s['catalog_number'], 'MS_genus': s['genus'], 'MS_species': s['species'], 'iDB_institution_code': get_json_field(json, ['indexTerms', 'institutioncode']), 'iDB_collection_code': get_json_field(json, ['indexTerms', 'collectioncode']), 'iDB_catalog_number': get_json_field(json, ['indexTerms', 'catalognumber']), 'iDB_genus': get_json_field(json, ['indexTerms', 'genus']), 'iDB_species': get_json_field(json, ['indexTerms', 'specificepithet']) } mislinked = mislinked.append(row, ignore_index=True) mislinked.to_csv('mislinked_specimens_it.csv', index=False, index_label=False)
def get(self): args = self.parser.parse_args(strict=True) qry = sa.sql.text(self.fulltext_qry) findme = ' & '.join(args['q'].split()) data = db_conn().execute(qry, findme = findme).fetchall() return {"api_version": "0.2", "pagination": {'per_page': 20, 'page': 1, 'pages': 1, 'count': len(data)}, "results": [dict(d) for d in data]}
def func_wrapper(*args, **kwargs): if 'email' in session and 'pw' in session: with db_conn() as conn: user = users.get_user_by_email_hash(conn, session['email'], session['pw']) if user: kwargs['user'] = user kwargs['conn'] = conn return func(*args, **kwargs) return redirect(url_for('login'))
def summary(): with db_conn() as conn: us_by_user = users.get_user_snapshots_by_user(conn) wl_by_user = users.get_user_weightloss_by_user(conn) uids = set() for u in us_by_user: uids.add(u.uid) for u in wl_by_user: uids.add(u.uid) return render_template('summary.html', uids=uids, title='Summary', us_by_user=us_by_user, wl_by_user=wl_by_user)
def view(): tables_select = ViewTables() conn = db.db_conn() cursor = conn.cursor() cursor.execute("SHOW TABLES") table_list = cursor.fetchall() conn.close() cursor.close() tables_select.tables.choices = [("", "---")] + [(table_list[0], table_list[0]) for table_list in table_list] return render_template('view.html', tables_select=tables_select, heading="View")
def article(year, month, day, slug): with db.db_conn(config.DATABASE_NAME) as conn: articles = get_article(conn, year, month, day, slug) comments = conn.execute_fetch( 'select commenter, comment, tstamp from comments where entryid=%s order by tstamp', (articles[0][4], )) nxt, prev = get_next_prv(conn, articles[0][2]) return template('view/blog', articles=articles, comments=comments, comment_counts={}, single_post=True, next=nxt, previous=prev)
def get_crn(semester): conn = db.db_conn() cursor = conn.cursor() cursor.execute("SELECT class_id, CRN FROM Class Where semester = '{}';".format(semester)) CRNs = cursor.fetchall() conn.close() cursor.close() CRN_Array = [] for CRN in CRNs: CRN_Obj = {'class_id': CRN[0], 'CRN': CRN[1]} CRN_Array.append(CRN_Obj) return jsonify({'CRNs': CRN_Array})
def upload(entryid=None): if request.get_header('X-Maximal') != config.BLOG_PASSWORD: abort(403, 'Forbidden') title = request.json.get('title') body = request.json.get('body') slug = re.sub('\s+', '-', title.strip().lower()) slug = re.sub('[^\w-]', '', slug) with db.db_conn(config.DATABASE_NAME) as conn: response.content_type = 'text/json' if entryid is None: inserted = conn.execute_fetchone('insert into entries values (default, %s, %s, now(), %s) returning id', (title, body, slug)) return json.dumps({'id':inserted[0]}) else: entryid = int(entryid) conn.execute('update entries set title=%s, body=%s, slug=%s where id=%s', (title, body, slug, entryid)) return json.dumps({'id':entryid})
def login(): if request.args.get('email') and request.args.get('pw'): email = request.args.get('email') pw = request.args.get('pw') with db_conn() as conn: user = users.get_user_by_credentials(conn, email, pw) return_path = request.args.get('ref') if user: session['email'] = email session['pw'] = user.password # hashed version if return_path: return redirect(return_path) else: return redirect(url_for('profile')) return render_template('login.html', title='Login')
def get_strats(color_passive="red", color_active="blue"): conn = db.db_conn() cursor = conn.cursor() strats = [] color = [] cursor.execute("SELECT method_name, type FROM Methods") result = cursor.fetchall() for j in result: strats.append(j[0]) if j[1] == "P": color.append(color_passive) elif j[1] == "a": color.append(color_active) else: color.append("k") return strats, color
def remove(): conn = db.db_conn() cursor = conn.cursor() cursor.execute("SELECT DISTINCT semester FROM Class") semesters = cursor.fetchall() remove_class_form = GenForm() remove_class_form.semester.choices = [("", "---")] + [(semesters[0], semesters[0]) for semesters in semesters] remove_class_form.CRN.choices = [("", "---")] remove_class_CRN = remove_class_form.CRN.data remove_exam_form = GenForm() remove_exam_form.semester.choices = [("", "---")] + [(semesters[0], semesters[0]) for semesters in semesters] remove_exam_form.CRN.choices = [("", "---")] remove_exam_form.exam.choices = [("", "---")] remove_exam_CRN = remove_exam_form.CRN.data remove_exam_exam = remove_exam_form.exam.data if "remove_class" in request.form: remove_class = remove_class_CRN cursor.execute("SELECT * FROM Class WHERE class_id = {}".format(remove_class)) confirm = cursor.fetchall()[0] cursor.execute("DELETE FROM Responses Where class_id = {}".format(remove_class)) cursor.execute("DELETE FROM methods_used Where class_id = {}".format(remove_class)) cursor.execute("DELETE FROM Exam Where class_id = {}".format(remove_class)) cursor.execute("DELETE FROM Student WHERE Class_id = {}".format(remove_class)) cursor.execute("DELETE FROM Class Where class_id = {}".format(remove_class)) flash("Removed class {} {} {} {} {}".format(confirm[0], confirm[1], confirm[2], confirm[3], confirm[4]), 'cat5') conn.commit() elif "remove_exam" in request.form: cursor.execute("SELECT exam_id FROM Exam WHERE class_id = {} AND exam_num = {}".format(remove_exam_CRN, remove_exam_exam)) remove_exam = cursor.fetchall()[0][0] cursor.execute("SELECT * FROM Class WHERE class_id = {}".format(remove_exam_CRN)) confirm = cursor.fetchall()[0] print(remove_exam) cursor.execute("DELETE FROM Responses Where exam_num = {} AND class_id = {}".format(remove_exam_exam, remove_exam_CRN)) cursor.execute("DELETE FROM methods_used Where exam_id = {} AND class_id = {}".format(remove_exam_exam, remove_exam_CRN)) cursor.execute("DELETE FROM Exam Where exam_id = {}".format(remove_exam)) flash("Removed exam from {} {} {} {} {}".format(confirm[0], confirm[1], confirm[2], confirm[3], confirm[4]), 'cat6') conn.commit() cursor.close() conn.close() return render_template('remove.html', remove_class_form=remove_class_form, remove_exam_form=remove_exam_form, heading="Remove")
def get_test(class_id): conn = db.db_conn() cursor = conn.cursor() cursor.execute( "select DISTINCT exam_id, exam_num from Exam INNER JOIN Class ON Class.class_id = Exam.class_id WHERE " "Class.class_id = {}".format(class_id)) tests = cursor.fetchall() print(tests) conn.close() cursor.close() testsArray = [] for test in tests: testObj = {'exam_id': test[0], 'exam_num': test[1]} testsArray.append(testObj) return jsonify({'tests': testsArray})
def draft(slug): try: homedir = '/home/mrshoe' blogdir = os.path.join(homedir, '.blog') f = open(os.path.join(blogdir, slug)) lines = f.readlines() title = lines[0].strip() published = lines[-1].strip().lower() == 'published' bodylines = lines[1:-1] if published else lines[1:] body = ''.join(bodylines) with db.db_conn(config.DATABASE_NAME) as conn: article = conn.execute_fetch( 'select published from entries where slug=%s', (slug.strip(), )) if len(article) > 1: abort(500, 'Multiple entries with that slug') elif len(article) == 1: if published: if article[0][0] is not None: # already published conn.execute( 'update entries set title=%s, body=%s where slug=%s', (title, body, slug)) else: conn.execute( 'update entries set title=%s, body=%s, published=now() where slug=%s', (title, body, slug)) else: conn.execute( 'update entries set title=%s, body=%s, published=null where slug=%s', (title, body, slug)) else: if published: conn.execute( 'insert into entries values (default, %s, %s, now(), %s)', (title, body, slug)) else: conn.execute( 'insert into entries values (default, %s, %s, NULL, %s)', (title, body, slug)) return template('view/blog', articles=[(title, body, datetime.datetime.now(), slug)], next=None, previous=None) except IOError: abort(404, 'No such entry')
def signup(): if (request.args.get('email') and request.args.get('pw') and request.args.get('first_name') and request.args.get('last_name')): email = request.args.get('email') pw = request.args.get('pw') first_name = request.args.get('first_name') last_name = request.args.get('last_name') with db_conn() as conn: try: if users.create_user(conn, first_name, last_name, email, pw): return redirect('/profile') return 'Unexpected error' except ValueError: return render_template('signup.html', title='Signup', error='User already exists!') return redirect('/profile') return render_template('signup.html', title='Signup')
def upload(entryid=None): if request.get_header('X-Maximal') != config.BLOG_PASSWORD: abort(403, 'Forbidden') title = request.json.get('title') body = request.json.get('body') slug = re.sub('\s+', '-', title.strip().lower()) slug = re.sub('[^\w-]', '', slug) with db.db_conn(config.DATABASE_NAME) as conn: response.content_type = 'text/json' if entryid is None: inserted = conn.execute_fetchone( 'insert into entries values (default, %s, %s, now(), %s) returning id', (title, body, slug)) return json.dumps({'id': inserted[0]}) else: entryid = int(entryid) conn.execute( 'update entries set title=%s, body=%s, slug=%s where id=%s', (title, body, slug, entryid)) return json.dumps({'id': entryid})
def get_table(table): conn = db.db_conn() cursor = conn.cursor() cursor.execute("SELECT * FROM StudyStrategies1.{}".format(table)) table_vals = cursor.fetchall() cursor.execute("SELECT COLUMN_NAME FROM information_schema.columns WHERE TABLE_SCHEMA = 'StudyStrategies1' AND " "TABLE_NAME = '{}';".format(table)) table_cols = cursor.fetchall() print(table_cols) conn.close() cursor.close() test_array = [] for i in range(len(table_vals)): testObj = {} for j in range(len(table_cols)): testObj.update({table_cols[j][0]: table_vals[i][j]}) test_array.append(testObj) return jsonify(test_array)
def index(): form = GenForm() conn = db.db_conn() cursor = conn.cursor() cursor.execute("SELECT DISTINCT semester FROM Class") semesters = cursor.fetchall() conn.close() cursor.close() form.semester.choices = [("", "---")] + [(semesters[0], semesters[0]) for semesters in semesters] form.CRN.choices = [("", "---")] form.exam.choices = [("", "---")] semester = form.semester.data class_id = form.CRN.data exam = form.exam.data color_passive = form.color_passive.data color_active = form.color_active.data graph_type = form.type.data filename = '' if form.validate_on_submit(): upload = form.upload.data filename = secure_filename(upload.filename) upload.save(os.path.join(app.instance_path, 'upload', filename)) return redirect(url_for('index')) if request.method == 'POST': if graph_type == '1': filename = graph_test.test1(class_id) elif graph_type == '2': filename = graph_test.test2(semester) elif graph_type == '3': filename = graph_test.test(exam, class_id, color_passive, color_active) print(filename) return render_template('index.html', form=form, heading="Analyze", filename=filename)
def blog(page=0): with db.db_conn(config.DATABASE_NAME) as conn: page = int(page) articles = conn.execute_fetch( 'select title, body, published, slug, id from entries where published is not null order by published desc limit %s offset %s', (config.ARTICLES_PER_PAGE, page * config.ARTICLES_PER_PAGE)) if not articles: abort(404, 'Not found') article_count = conn.execute_fetchone('select count(*) from entries') if not article_count: abort(404, 'Not found') if page == 1: nxt = '/blog/' elif page > 1: nxt = '/blog/%d' % (page - 1) else: nxt = None prev = '/blog/%d' % (page + 1) if article_count[0] > ( (page + 1) * config.ARTICLES_PER_PAGE) else None articleids = [str(a[4]) for a in articles] comment_counts = dict( conn.execute_fetch( 'select entryid, count(*) from comments where entryid in (%s) group by entryid' % ','.join(articleids), ())) for k, v in comment_counts.iteritems(): if v == 1: comment_counts[k] = '1 comment' else: comment_counts[k] = '%d comments' % v return template('view/blog', articles=articles, comments=[], comment_counts=comment_counts, single_post=False, next=nxt, previous=prev)
def blog(page=0): with db.db_conn(config.DATABASE_NAME) as conn: page = int(page) articles = conn.execute_fetch('select title, body, published, slug, id from entries where published is not null order by published desc limit %s offset %s', (config.ARTICLES_PER_PAGE, page*config.ARTICLES_PER_PAGE)) if not articles: abort(404, 'Not found') article_count = conn.execute_fetchone('select count(*) from entries') if not article_count: abort(404, 'Not found') if page == 1: nxt = '/blog/' elif page > 1: nxt = '/blog/%d' % (page-1) else: nxt = None prev = '/blog/%d' % (page+1) if article_count[0] > ((page+1) * config.ARTICLES_PER_PAGE) else None articleids = [str(a[4]) for a in articles] comment_counts = dict(conn.execute_fetch('select entryid, count(*) from comments where entryid in (%s) group by entryid' % ','.join(articleids),())) for k,v in comment_counts.iteritems(): if v == 1: comment_counts[k] = '1 comment' else: comment_counts[k] = '%d comments' % v return template('view/blog', articles=articles, comments=[], comment_counts=comment_counts, single_post=False, next=nxt, previous=prev)
def add(): add_class_form = AddClassForm() CRN = add_class_form.CRN.data class_name = add_class_form.class_name.data class_num = add_class_form.class_num.data semester = add_class_form.semester.data students = add_class_form.students.data add_exam_form = GenForm() conn = db.db_conn() cursor = conn.cursor() cursor.execute("SELECT DISTINCT semester FROM Class") semesters = cursor.fetchall() add_exam_form.semester.choices = [("", "---")] + [(semesters[0], semesters[0]) for semesters in semesters] add_exam_form.CRN.choices = [("", "---")] add_exam_CRN = add_exam_form.CRN.data add_grades_form = GenForm() add_grades_form.semester.choices = [("", "---")] + [(semesters[0], semesters[0]) for semesters in semesters] add_grades_form.CRN.choices = [("", "---")] add_grades_form.exam.choices = [("", "---")] upload_grade = add_grades_form.upload.data semester_grade = add_grades_form.semester.data class_id_grade = add_grades_form.CRN.data exam_grade = add_grades_form.exam.data if "add_class" in request.form: if CRN is "" or class_name is "" or class_num is "" or semester is "" or students is "": flash("Input error", "cat3") elif not CRN.isdigit(): flash("Invalid CRN", "cat3") elif not class_num.isdigit(): flash("Invalid class number", "cat3") elif not students.isdigit(): flash("Invalid student number", "cat3") else: cursor.execute( "INSERT INTO Class (CRN, class_name, class_num, semester)" "VALUES('{}', '{}', '{}', '{}')".format(CRN, class_name, class_num, semester)) conn.commit() cursor.execute( "SELECT class_id FROM Class WHERE CRN = '{}' AND class_name = '{}' AND class_num = '{}' AND semester " "= '{}'".format( CRN, class_name, class_num, semester)) class_id = cursor.fetchall() ids_list = [] for i in range(int(students)): cursor.execute("INSERT INTO Student (class_id) VALUES('{}')".format(class_id[0][0])) conn.commit() cursor.execute("SELECT student_identifier FROM Student Where class_id = {}".format(class_id[0][0])) id_tmp = cursor.fetchall() ids_list.append(id_tmp[i][0]) students_range = str(ids_list[0]) + ' - ' + str(ids_list[len(ids_list) - 1]) cursor.close() conn.close() flash("New Class ID: {} with student ids from {}".format(class_id[0][0], students_range), "cat4") elif "add_exam" in request.form: class_id = add_exam_CRN print(class_id) if class_id is "": flash("Input error", "cat1") else: cursor.execute("SELECT exam_num FROM Exam WHERE class_id = '{}'".format( class_id)) exam_nums = cursor.fetchall() exam_num = 1 if len(exam_nums) is not 0: exam_num = exam_nums[-1][0] + 1 print(class_id) if exam_num > 3: flash('This class already has 3 exams', 'cat2') else: cursor.execute("INSERT INTO Exam (class_id, semester, exam_num) VALUES('{}', (SELECT semester FROM " "Class WHERE class_id = {}), '{}')".format(class_id, class_id, exam_num)) cursor.execute("SELECT exam_id FROM Exam WHERE class_id = '{}' AND exam_num = '{}'".format( class_id, exam_num)) exam_id = cursor.fetchall() flash("New Exam ID: {}".format(exam_id[0][0]), "cat1") conn.commit() conn.close() cursor.close() elif "add_grades" in request.form: filename = secure_filename(upload_grade.filename) path = os.path.join('grades', filename) upload_grade.save(path) wb = load_workbook(path) ws = wb.active max_row = ws.max_row ids_tmp = ws['A2:A' + str(max_row)] grades_tmp = ws['B2:B' + str(max_row)] ids = [] grades = [] for i in range(len(ids_tmp)): ids.append(ids_tmp[i][0].value) for i in range(len(grades_tmp)): grades.append(grades_tmp[i][0].value) grades_dict = dict(zip(ids, grades)) cursor.execute("SELECT student_identifier FROM Responses WHERE class_id = {} AND exam_num = {}".format(class_id_grade, exam_grade)) verify = cursor.fetchall() print(verify) if verify == verify: count = 0 for student_id, grade in grades_dict.items(): print(student_id, grade) cursor.execute( "SELECT response_id FROM Responses WHERE student_identifier = {} AND class_id = {} AND exam_num = {}".format( student_id, class_id_grade, exam_grade)) response_id_tmp = cursor.fetchall() if response_id_tmp != []: print(response_id_tmp) cursor.execute( "UPDATE Responses SET grade = {} WHERE response_id = {}".format(grade, response_id_tmp[0][0])) count = count+1 conn.commit() flash("{} rows affected".format(count), "cat7") conn.close() cursor.close() return render_template('add.html', add_class_form=add_class_form, add_exam_form=add_exam_form, heading="Edit", add_grades_form=add_grades_form)
def article(year, month, day, slug): with db.db_conn(config.DATABASE_NAME) as conn: articles = get_article(conn, year, month, day, slug) comments = conn.execute_fetch('select commenter, comment, tstamp from comments where entryid=%s order by tstamp', (articles[0][4],)) nxt, prev = get_next_prv(conn, articles[0][2]) return template('view/blog', articles=articles, comments=comments, comment_counts={}, single_post=True, next=nxt, previous=prev)
def auto_relink_check(): relink_success = pandas.DataFrame(columns=[ 'specimen_id', 'MS_institution_code', 'MS_collection_code', 'MS_catalog_number', 'MS_genus', 'MS_species', 'Old_iDB_institution_code', 'Old_iDB_collection_code', 'Old_iDB_catalog_number', 'Old_iDB_genus', 'Old_iDB_species', 'New_iDB_institution_code', 'New_iDB_collection_code', 'New_iDB_catalog_number', 'New_iDB_genus', 'New_iDB_species', 'Old_iDB_uuid', 'New_iDB_uuid' ]) csv_df = pandas.read_csv('auto_candidates.csv') for specimen_id in csv_df['auto']: conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens` AS s LEFT JOIN ms_specimens_x_taxonomy AS x ON x.specimen_id = s.specimen_id LEFT JOIN ms_taxonomy_names AS n ON n.alt_id = x.alt_id WHERE s.specimen_id = %s """ r = db.db_execute(c, sql, specimen_id) new_row = {} for s in r: # MS Values new_row['specimen_id'] = get_db_field(s, 'specimen_id') new_row['MS_institution_code'] = get_db_field( s, 'institution_code') new_row['MS_collection_code'] = get_db_field(s, 'collection_code') new_row['MS_catalog_number'] = get_db_field(s, 'catalog_number') new_row['MS_genus'] = get_db_field(s, 'genus') new_row['MS_species'] = get_db_field(s, 'species') # iDB Values if get_db_field(s, 'uuid'): print specimen_id old_idb = requests.get( 'https://search.idigbio.org/v2/view/records/' + get_db_field(s, 'uuid')).json() new_row['Old_iDB_institution_code'] = get_json_field( old_idb, ['indexTerms', 'institutioncode']) new_row['Old_iDB_collection_code'] = get_json_field( old_idb, ['indexTerms', 'collectioncode']) new_row['Old_iDB_catalog_number'] = get_json_field( old_idb, ['indexTerms', 'catalognumber']) new_row['Old_iDB_genus'] = get_json_field( old_idb, ['indexTerms', 'genus']) new_row['Old_iDB_species'] = get_json_field( old_idb, ['indexTerms', 'specificepithet']) new_row['Old_iDB_uuid'] = get_json_field( old_idb, ['indexTerms', 'uuid']) if new_row['MS_catalog_number'] and new_row['MS_genus']: new_idb = requests.get( 'https://search.idigbio.org/v2/search/records/?rq={"catalognumber": "' + get_db_field(s, 'catalog_number') + '", "genus": "' + get_db_field(s, 'genus') + '"}').json() if int(new_idb['itemCount']) == 1: item = new_idb['items'][0] new_row['New_iDB_institution_code'] = get_json_field( item, ['indexTerms', 'institutioncode']) new_row['New_iDB_collection_code'] = get_json_field( item, ['indexTerms', 'collectioncode']) new_row['New_iDB_catalog_number'] = get_json_field( item, ['indexTerms', 'catalognumber']) new_row['New_iDB_genus'] = get_json_field( item, ['indexTerms', 'genus']) new_row['New_iDB_species'] = get_json_field( item, ['indexTerms', 'specificepithet']) new_row['New_iDB_uuid'] = get_json_field( item, ['indexTerms', 'uuid']) else: warnings.warn('WARNING: ' + str(new_idb['itemCount']) + ' items found for specimen ' + str(specimen_id)) else: raise ValueError('No catalog number and genus for specimen ' + str(specimen_id)) relink_success = relink_success.append(new_row, ignore_index=True) relink_success.to_csv('relink_success.csv', index=False, index_label=False)
def test1(class_num): conn = db.db_conn() cursor = conn.cursor() cursor.execute( "SELECT count(exam_id) from Exam WHERE class_id = {}".format( class_num)) exam_count = cursor.fetchall()[0][0] exam_list = [] for i in range(exam_count): responses_list = [] for j in range(13): cursor.execute( 'SELECT count(`{}`) from methods_used where `{}` = 1 and class_id = {} and exam_id = {}' .format(j, j, class_num, i + 1)) result = cursor.fetchall() responses_list.append(result[0][0]) exam_list.append(responses_list) strats = get_strats()[0] strats = [textwrap.fill(text, 15) for text in strats] index = np.arange(len(strats)) width = 0.27 fig = plt.figure() ax = fig.add_subplot(111) rects_list = [] for i in range(len(exam_list)): if i == 0: rects = ax.bar(index, exam_list[i], width, color='red', label='Exam {}'.format(i + 1)) rects_list.append(rects) autolabel(rects, ax) print(exam_list[i]) elif i == 1: rects = ax.bar(index + width, exam_list[i], width, color='green', label='Exam {}'.format(i + 1)) rects_list.append(rects) autolabel(rects, ax) print(exam_list[i]) else: rects = ax.bar(index + width * i, exam_list[i], width, color='blue', label='Exam {}'.format(i + 1)) rects_list.append(rects) autolabel(rects, ax) print(exam_list[i]) cursor.execute( "SELECT CRN, class_name, class_num FROM Class Where class_id = {}". format(class_num)) class_title = cursor.fetchall() CRN = class_title[0][0] class_name = class_title[0][1] class_num = class_title[0][2] conn.close() cursor.close() ax.set_title('By Class for {} {} {}'.format(CRN, class_name, class_num)) ax.set_ylabel('Responses') ax.set_xlabel('Strategies') ax.set_xticks(index + width) ax.set_xticklabels(strats) ax.legend() plt.subplots_adjust(left=0.05, right=0.95, bottom=0.20, top=0.95) figure = plt.gcf() figure.set_size_inches(20, 8) filename = 'graphs/class {} {} {}.png'.format(CRN, class_name, class_num) figure.savefig('.//static//graphs//class {} {} {}'.format( CRN, class_name, class_num), dpi=150) figure.clf() return filename
def comment(year, month, day, slug): with db.db_conn(config.DATABASE_NAME) as conn: articles = get_article(conn, year, month, day, slug) if request.forms['jstest'] == 'valid': conn.execute("insert into comments values (default, %s, %s, %s, now() at time zone 'US/Pacific')", (articles[0][4], request.forms['commenter'].strip(), request.forms['comment'].strip())) redirect(request.path)
def workouttypes(): with db_conn() as conn: workout_types = workouts.get_workout_types(conn) return render_template('workout_types.html', title='Workout Types', workout_types=workout_types)
from db import db_conn from db_config import db_config as df conn = db_conn(df['host'], df['port'], df['user'], df['password'], df['database'], df['charset']) links = conn.db_select('select * from positions') print(len(links)) conn.db_close()
def update_specimen_taxon(): success_csv = pandas.DataFrame( columns=['specimen_id', 'success', 'new_taxon']) specimen_csv = pandas.read_csv('specimens_to_be_renamed.csv') for index, row in specimen_csv.iterrows(): success_csv = pandas.read_csv('update_taxon_success_summary.csv') specimen_id = str(row['specimen_id']) new_row = {} new_taxon = 0 print(specimen_id) # Get MS specimen record conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens` AS s JOIN `ms_specimens_x_taxonomy` AS sxt ON sxt.specimen_id = s.specimen_id JOIN `ms_taxonomy_names` AS n ON n.alt_id = sxt.alt_id WHERE s.specimen_id = %s """ spec_rec = db.db_execute(c, sql, specimen_id) spec_rec = spec_rec[0] # Get iDB specimen record for new UUID idb = requests.get('https://search.idigbio.org/v2/view/records/' + spec_rec['uuid']).json() item = idb # ------------------TAXONOMY RECORD------------------------- # Do taxa differ? if get_json_field(item, ['indexTerms', 'genus' ]).lower() != spec_rec['genus'].lower(): print('Current taxon and real taxon differ') print(get_json_field(item, ['indexTerms', 'genus']).lower()) print(spec_rec['genus'].lower()) # Taxa differ. Is there a MS taxon for the 'new' taxon? conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_taxonomy` AS t JOIN `ms_taxonomy_names` AS n ON n.taxon_id = t.taxon_id WHERE n.genus = %s AND n.species = %s """ r = db.db_execute(c, sql, [ get_json_field(item, ['indexTerms', 'genus']), get_json_field(item, ['indexTerms', 'specificepithet']) ]) if len(r) == 0: print('Need to generate new taxon') # Need to generate an all new taxon ''' Steps: 1) Delete the sxt link to the current taxon 2) Create a new ms_taxonomy record for this taxon 3) Create a new ms_taxonomy_names record for this taxon, using iDB 4) Create a new sxt link from specimen to new taxon ''' # Get old taxon_id conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens_x_taxonomy` WHERE specimen_id = %s """ old_sxt = db.db_execute(c, sql, int(specimen_id)) if len(old_sxt) == 1: old_taxon_id = old_sxt[0]['taxon_id'] old_link_id = old_sxt[0]['link_id'] else: raise ValueError('More than one sxt for specimen_id ' + str(specimen_id)) # delete old link conn = db.db_conn() c = conn.cursor() sql = """ DELETE FROM `ms_specimens_x_taxonomy` WHERE link_id = %s """ del_res = db.db_execute(c, sql, int(old_link_id)) # create new ms_taxonomy_record taxon_vals = [ int(spec_rec['project_id']), int(spec_rec['user_id']), int(time.time()), int(time.time()) ] conn = db.db_conn() c = conn.cursor() sql = """ INSERT INTO `ms_taxonomy` (project_id, user_id, created_on, last_modified_on) VALUES (%s, %s, %s, %s) """ tax_res = db.db_execute(c, sql, taxon_vals) # create new ms_taxonomy_names new_taxon_id = c.lastrowid tn_vals = [ int(spec_rec['project_id']), int(new_taxon_id), int(spec_rec['user_id']), get_json_field(item, ['indexTerms', 'specificepithet']), get_json_field(item, ['indexTerms', 'kingdom']), get_json_field(item, ['indexTerms', 'phylum']), get_json_field(item, ['indexTerms', 'class']), get_json_field(item, ['indexTerms', 'order']), get_json_field(item, ['indexTerms', 'family']), int(time.time()), int(time.time()), get_json_field(item, ['indexTerms', 'genus']).capitalize() ] conn = db.db_conn() c = conn.cursor() sql = """ INSERT INTO `ms_taxonomy_names` (project_id, taxon_id, user_id, species, ht_kingdom, ht_phylum, ht_class, ht_order, ht_family, created_on, last_modified_on, genus, is_primary) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 1) """ tn_res = db.db_execute(c, sql, tn_vals) # create new sxt new_alt_id = c.lastrowid conn = db.db_conn() c = conn.cursor() sql = """ INSERT INTO `ms_specimens_x_taxonomy` (specimen_id, taxon_id, alt_id, user_id) VALUES (%s, %s, %s, %s) """ ins_res = db.db_execute(c, sql, [ int(specimen_id), int(new_taxon_id), int(new_alt_id), int(spec_rec['user_id']) ]) success = 1 new_taxon = 1 elif len(r) == 1: print('Matching specimen to currently existing taxon record') # Need to associate this specimen with currently existing new taxon ''' Steps: 1) Get old taxon id 2) Get new taxon id 3) Check for previously existing link between specimen to new_taxon_id 4) delete old link 5) create new link ''' # Get old taxon_id conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens_x_taxonomy` WHERE specimen_id = %s """ old_sxt = db.db_execute(c, sql, [specimen_id]) if len(old_sxt) == 1: old_taxon_id = int(old_sxt[0]['taxon_id']) old_link_id = int(old_sxt[0]['link_id']) else: raise ValueError('More than one sxt for specimen_id ' + specimen_id) # Get new taxon_id new_taxon_id = int(r[0]['taxon_id']) new_alt_id = int(r[0]['alt_id']) # Is there already an sxt for the new_taxon_id? conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens_x_taxonomy` WHERE specimen_id = %s AND taxon_id = %s """ new_sxt = db.db_execute(c, sql, [int(specimen_id), new_taxon_id]) if len(new_sxt) > 0: raise ValueError( 'Already existing link between taxon id ' + str(new_taxon_id) + ' and specimen_id ' + str(specimen_id)) else: #delete old sxt conn = db.db_conn() c = conn.cursor() sql = """ DELETE FROM `ms_specimens_x_taxonomy` WHERE link_id = %s """ del_res = db.db_execute(c, sql, old_link_id) #create new sxt conn = db.db_conn() c = conn.cursor() sql = """ INSERT INTO `ms_specimens_x_taxonomy` (specimen_id, taxon_id, alt_id, user_id) VALUES (%s, %s, %s, %s) """ ins_res = db.db_execute(c, sql, [ int(specimen_id), new_taxon_id, new_alt_id, int(spec_rec['user_id']) ]) success = 1 new_taxon = 1 else: raise ValueError( str(len(r)) + ' MS taxon records for specimen uuid ' + str(row['New_iDB_uuid'])) else: success = 0 warnings.warn('Genera do not differ for specimen ' + str(specimen_id) + ', skipping') new_specimen_id = int(specimen_id) new_row['success'] = success new_row['new_taxon'] = new_taxon success_csv = success_csv.append(new_row, ignore_index=True) success_csv.to_csv('update_taxon_success_summary.csv', index=False, index_label=False)
def get(self): speedlogger = logging.getLogger('speed') overall_start_time = time.time() speedlogger.info('--------------------------------------------------') args = self.parser.parse_args(strict=True) elements = [] page_num = 1 show_fields = copy.copy(self.default_fields) field_list = self.find_fields(args) # queries need year to link the data year = args.get('year', default_year()) for arg in args: if args[arg]: if arg == 'q': qry = self.fulltext_qry.format( name_stem=self.table_name_stem) qry = sa.sql.text(qry) speedlogger.info('\nfulltext query: \n%s' % qry) start_time = time.time() findme = ' & '.join(args['q'].split()) fts_result = db_conn().execute(qry, findme=findme).fetchall() speedlogger.info('fulltext query time: %f' % (time.time() - start_time)) if not fts_result: return [] elements.append( "%s_sk={%s}" % (self.table_name_stem, ",".join(str(id[0]) for id in fts_result))) elif arg == 'page': page_num = args[arg] elif arg == 'per_page': per_page = args[arg] elif arg == 'fields': # going through the different kinds of mappings and fields for maps, field_name in self.maps_fields: show_fields[field_name] = '' # looking at each field the user requested for field in field_list: # for each mapping, see if there is a field match. # If so, add it to the field list for m in maps: if m[0] == field: show_fields[field_name] += m[1] + ',' else: if arg in self.field_name_map: element = self.field_name_map[arg].substitute( arg=args[arg].upper().replace(',', "','")) elements.append(element) qry = self.query_text(show_fields) if elements: qry += "?" + "&".join(elements) count_qry = "/count(%s?%s)" % (self.viewable_table_name, "&".join(elements)) # Committee endpoint is not year sensitive yet, so we don't want # to limit it yet. Otherwise, the candidate's won't show if they # are not in the default year. if year != '*' and (str(self.endpoint) == 'candidateresource' or str(self.endpoint) == 'candidatesearch'): qry = qry.replace( 'dimcandoffice', '(dimcandoffice?cand_election_yr={%s})' % year) count_qry = count_qry.replace( 'dimcandoffice', '(dimcandoffice?cand_election_yr={%s})' % year) else: count_qry = "/count(%s)" % self.viewable_table_name offset = per_page * (page_num-1) qry = "/(%s).limit(%d,%d)" % (qry, per_page, offset) print("\n%s\n" % (qry)) speedlogger.info('\n\nHTSQL query: \n%s' % qry) start_time = time.time() data = htsql_conn().produce(qry) speedlogger.info('HTSQL query time: %f' % (time.time() - start_time)) count = htsql_conn().produce(count_qry) data_dict = as_dicts(data) # page info data_count = int(count[0]) pages = data_count/per_page if data_count % per_page != 0: pages += 1 if data_count < per_page: per_page = data_count page_data = {'per_page': per_page, 'page': page_num, 'pages': pages, 'count': data_count} speedlogger.info('\noverall time: %f' % (time.time() - overall_start_time)) return self.format(data_dict, page_data, year)
def test2(semester): conn = db.db_conn() cursor = conn.cursor() cursor.execute( "SELECT DISTINCT class_id FROM Exam WHERE semester = '{}'".format( semester)) class_id_list = cursor.fetchall() cursor.execute( "SELECT DISTINCT exam_num FROM Exam WHERE semester = '{}'".format( semester)) exam_num_list = cursor.fetchall() class_list = [] for i in range(len(class_id_list)): exam_list = [] for j in range(len(exam_num_list)): response_list = [] for k in range(13): cursor.execute( 'SELECT count(`{}`) from .methods_used where `{}` = 1 and class_id = {} and ' 'exam_id = {}'.format(k, k, class_id_list[i][0], exam_num_list[j][0])) result = cursor.fetchall() response_list.append(result[0][0]) exam_list.append(response_list) class_list.append(exam_list) sums = [] for i in range(len(class_list)): tmp = [sum(x) for x in zip(*class_list[i])] sums.append(tmp) print(sums) conn.close() cursor.close() strats = get_strats()[0] strats = [textwrap.fill(text, 15) for text in strats] index = np.arange(len(strats)) width = 0.27 fig = plt.figure() ax = fig.add_subplot(111) rects_list = [] for i in range(len(sums)): if i == 0: rects = ax.bar(index, sums[i], width, color='red', label='Class {}'.format(i + 1)) rects_list.append(rects) autolabel(rects, ax) print(sums[i]) elif i == 1: rects = ax.bar(index + width, sums[i], width, color='green', label='Class {}'.format(i + 1)) rects_list.append(rects) autolabel(rects, ax) print(sums[i]) else: rects = ax.bar(index + width * i, sums[i], width, color='blue', label='Class {}'.format(i + 1)) rects_list.append(rects) autolabel(rects, ax) print(sums[i]) ax.set_title('By Semester for {}'.format(semester)) ax.set_ylabel('Responses') ax.set_xlabel('Strategies') ax.set_xticks(index + width) ax.set_xticklabels(strats) ax.legend() plt.subplots_adjust(left=0.05, right=0.95, bottom=0.20, top=0.95) figure = plt.gcf() figure.set_size_inches(20, 8) filename = 'graphs/semester {}.png'.format(semester) figure.savefig('.//static//graphs//semester {}.png'.format(semester), dpi=150) figure.clf() return filename
import sys, os sys.path.append(os.path.abspath(os.path.join('..', '..', 'lib'))) import db import requests conn = db.db_conn() c = conn.cursor() sql = """ SELECT * FROM `ms_specimens` """ r = db.db_execute(c, sql) for s in r: recordset = None if s['uuid']: resp = requests.get('https://search.idigbio.org/v2/view/records/' + s['uuid']) json = resp.json() recordset = json['indexTerms']['recordset'] print(recordset) sql = 'UPDATE `ms_specimens` SET `recordset` = "' + recordset + '" WHERE `specimen_id` = ' + str( s['specimen_id']) + '' print(sql) db.db_execute(c, sql)
def test(exam, class_num, color_passive, color_active): conn = db.db_conn() avgs = [] strats = [] color = [] cursor = conn.cursor() for i in range(13): query = ( "SELECT SUM(grade) / COUNT(grade) AS avg1 FROM Responses INNER JOIN methods_used ON " "Responses.response_id = methods_used.methods_used_id WHERE (methods_used.`{}` = 1 AND " "Responses.exam_num = {} AND Responses.class_id = {})".format( i, exam, class_num)) cursor.execute(query) result = cursor.fetchall() for j in result: if j[0] is not None: avgs.append(int(j[0])) else: avgs.append(0) print(avgs) for i in range(13): cursor.execute( "SELECT method_name, type FROM Methods WHERE type <> 'N'") result = cursor.fetchall() for j in result: strats.append(j[0]) if j[1] == "P": color.append(color_passive) else: color.append(color_active) avgs, strats, color = (list(t) for t in zip(*sorted(zip(avgs, strats, color)))) for i in range(len(avgs)): if avgs[0] == 0: avgs.pop(0) strats.pop(0) color.pop(0) print(avgs) cursor.execute( "SELECT CRN, class_name, class_num FROM Class Where class_id = {}". format(class_num)) class_title = cursor.fetchall() CRN = class_title[0][0] class_name = class_title[0][1] class_num = class_title[0][2] strats = [textwrap.fill(text, 15) for text in strats] index = np.arange(len(strats)) plt.bar(index, avgs, align='center', color=color) plt.xlabel('Study Strategies', fontsize=15) plt.ylabel('Averages', fontsize=15) plt.xticks(index, strats, fontsize=10) plt.title('{} {} {} Exam {}'.format(CRN, class_name, class_num, exam), fontsize=20) plt.subplots_adjust(left=0.05, right=0.95, bottom=0.20, top=0.95) figure = plt.gcf() figure.set_size_inches(20, 8) filename = 'graphs/norm {} {} {}.png'.format(CRN, class_name, class_num) figure.savefig('.//static//graphs//norm {} {} {}.png'.format( CRN, class_name, class_num), dpi=150) figure.clf() cursor.close() return filename