def group(group_num): # Each group is assigned to a specific server # So that the load is equally balanced # and we only have to read log files present locally on disk if my_IP() != group_IP(group_num): return redirect("http://%s:%d/group/%d" % (group_IP(group_num), 5001, group_num)) rows = deque() with open(PG_LOG, newline='') as f: for row in csv.reader(f): if not row[1].startswith("group_%d" % group_num): continue rows.append([row[0], row[13]]) if len(rows) > MAX_ROWS: rows.popleft() return render_template("group.html", rows=reversed(rows), group_num=group_num)
def schema(group_num): if my_IP() != group_IP(group_num): return redirect("http://%s:%d/schema/%d" % (group_IP(group_num), 5001, group_num)) ip = group_IP(group_num) db = "group_" + str(group_num) status, tables = run_query(ip, db, "\\dt") # if status is non zero if status: return "Database for group %s does not exist" % group_num Q = "SELECT C.relname AS table, C.reltuples AS approx_count FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace) WHERE N.nspname NOT IN ('pg_catalog', 'information_schema') AND C.relkind='r' ORDER BY C.relname;" "" _, counts = run_query(ip, db, Q, show_html=True) schemas = [] for row in tables.split("\n")[3:]: if "|" not in row: continue table = row.split("|")[1].strip() _, table_schema = run_query(ip, db, "\\d %s" % (table), show_html=True) schemas.append(table_schema) # Skip First row _, tables = run_query(ip, db, "\\dt", show_html=True) return render_template("schema.html", tables=tables, counts=counts, schemas=schemas, group_num=group_num)
def index(): user = request.authorization.username # Each group is assigned to a specific server # So that the load is equally balanced # and we only have to read log files present locally on disk if my_IP() != group_IP(user): return redirect("http://%s:%d" % (group_IP(user), 5000)) if request.method == 'GET': return render_template('base.html', group=user.split("_")[-1]) file = request.files['file'] # Keep in mind that this function is called multiple times for the same file! current_chunk = int(request.form['dzchunkindex']) student_dir = os.path.join("../uploads", user) if not os.path.exists(student_dir): os.makedirs(student_dir) file_path = os.path.join(student_dir, secure_filename(file.filename)) # If the file already exists it's ok if we are appending to it, # but not if it's new file that would overwrite the existing one if os.path.exists(student_dir) and current_chunk == 0: try: # Delete the entire student folder # Solves the problem of student uploading files with different names shutil.rmtree(student_dir) os.makedirs(student_dir) except OSError: return make_response(("Unable to delete old uploads.", 400)) # Write chunk to file try: with open(file_path, 'ab') as f: f.seek(int(request.form['dzchunkbyteoffset'])) f.write(file.stream.read()) except OSError: err = "Couldn't write to file." log.exception(err) return make_response((err, 500)) # Finished writing chunks? total_chunks = int(request.form['dztotalchunkcount']) if current_chunk + 1 == total_chunks: msg = pg_load(user, request.authorization.password, file_path) # Crude way of detecting that an error has occurred if b"ERROR:" in msg: log.error("%s - PG SQL returned: %s", user, msg.decode("ascii", "ignore")) return make_response((msg, 400)) else: log.debug("%s - Data upload successful", user) return make_response((msg, 200)) return make_response(("Chunk upload successful", 200))
def pg_load(user, pswd, dump_path): ip = group_IP(user) log.debug("%s - Performing Cleanup before loading", user) conn = connect(ip, "postgres", "vpl-362") # BUG: What if this fails cleanup(conn, user) conn.close() log.debug("%s - Cleanup Complete", user) # Load Database cmd = 'PGPASSWORD="******" psql -h {ip} -d {db} -U "postgres" < "{dump}"'.format(pswd=pswd, ip=ip, db=user, user=user, dump=dump_path) log.debug("%s - Running command: %s ", user, cmd) msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) log.debug("%s - Database Loading Complete", user) # REVOKE PRIVILEDGES conn = connect(ip, "postgres", "vpl-362", dbname=user) log.debug("%s - Revoking Priviledges", user) query = """ REVOKE CREATE ON SCHEMA public FROM public; GRANT ALL ON schema public TO postgres; GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO {user}; GRANT SELECT ON ALL TABLES IN SCHEMA public TO {user}; """ conn.cursor().execute(query.format(user=user)) conn.commit() conn.close() return msg
def change_ownership_db_components(group): print("\nChanging Ownerships...") ip = group_IP(group) print("\nTables...") q_tables = "SELECT TABLENAME FROM PG_TABLES WHERE SCHEMANAME = 'public';" status, tables = run_query(ip, group, q_tables) # If exist status of previous command is zero if not status: tables = [each.strip() for each in tables.split()] for each in tables: query = "ALTER TABLE \"{}\" OWNER TO {};".format(each, group) status, output = run_query(ip, group, query) print("\tQuery:{}\n\tStatus:{}\n\tOutput:{}\n\n".format( query, status, output)) print("\nSequences...") q_seq = "SELECT SEQUENCE_NAME FROM INFORMATION_SCHEMA.SEQUENCES WHERE SEQUENCE_SCHEMA = 'public';" status, sequences = run_query(ip, group, q_seq) # If exist status of previous command is zero if not status: sequences = [each.strip() for each in sequences.split()] for each in sequences: query = "ALTER SEQUENCE \"{}\" OWNER TO {};".format(each, group) status, output = run_query(ip, group, query) print("\tQuery:{}\n\tStatus:{}\n\tOutput:{}\n\n".format( query, status, output)) print("\nViews...") q_views = "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.VIEWS WHERE TABLE_SCHEMA = 'public';" status, views = run_query(ip, group, q_views) # If exist status of previous command is zero if not status: views = [each.strip() for each in views.split()] for each in views: query = "ALTER VIEW \"{}\" OWNER TO {};".format(each, group) status, output = run_query(ip, group, query) print("\tQuery:{}\n\tStatus:{}\n\tOutput:{}\n\n".format( query, status, output)) print("\nMaterialized Views...") q_mviews = "SELECT oid::regclass::text FROM pg_class WHERE relkind = 'm';" status, mviews = run_query(ip, group, q_mviews) # If exist status of previous command is zero if not status: mviews = [each.strip() for each in mviews.split()] for each in mviews: query = "ALTER MATERIALIZED VIEW \"{}\" OWNER TO {};".format( each, group) status, output = run_query(ip, group, query) print("\tQuery:{}\n\tStatus:{}\n\tOutput:{}\n\n".format( query, status, output))
def give_permissions(group): print("\nGiving Permissions...") queries = [ "GRANT ALL ON ALL TABLES IN schema public TO {group};", "GRANT ALL ON ALL SEQUENCES IN schema public TO {group};", "GRANT ALL ON ALL FUNCTIONS IN schema public TO {group};", "ALTER SCHEMA public OWNER TO {group};", "ALTER DATABASE {group} OWNER TO {group};" ] ip = group_IP(group) for query in queries: query = query.format(group=group) status, output = run_query(ip, group, query) print("\tQuery:{}\n\tStatus:{}\n\tOutput:{}\n\n".format( query, status, output))
def pg_load(user, pswd, dump_path): ip = group_IP(user) log.debug("%s - Performing cleanup before loading", user) conn = connect(ip, "postgres", "vpl-362") cleanup(conn, user) conn.close() log.debug("%s - Cleanup complete", user) # Load Databases print("\n\n\n\n") print(dump_path) print("\n\n\n\n") cmd = 'PGPASSWORD="******" psql -h {ip} -d {db} -U postgres < "{dump}"'.format( pswd=pswd, ip=ip, db=user, user=user, dump=dump_path) log.debug("%s - Running command : %s", user, cmd) msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) log.debug("%s - Database Loading Complete", user) # Revoke Privileges conn = connect(ip, "postgres", "vpl-362", dbname=user) log.debug("%s- Revoking Privileges", user) # query = """ # REVOKE CREATE ON SCHEMA public FROM public; # GRANT ALL ON schema public TO postgres; # GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO {user}; # GRANT SELECT ON ALL TABLES IN SCHEMA public TO {user}; # """ query = """ GRANT CONNECT ON DATABASE {group} TO {user}; GRANT ALL PRIVILEGES ON DATABASE {group} TO {user}; GRANT USAGE ON SCHEMA public TO {user}; GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO {user}; """ conn.cursor().execute(query.format(user=user, group=user)) conn.commit() conn.close() return msg
names = [each.strip() for each in row[1::2] if each.strip()] entry_numbers = [ each.strip() for each in row[2::2] if each.strip() ] if group not in CREDENTIALS: print("Credentials for {group} does not exist.".format( group=group)) # elif group == "group_0": else: tolist = [ entry_to_kerberos(en) + MAIL_SERVER for en in entry_numbers ] host = group_IP(group) portal_url = host + ":5000" body = EMAIL_TEMPLATE.format(uname=group, pswd=CREDENTIALS[group], members=", ".join(names), url=portal_url, host=host) msg = MIMEText(body) msg['Subject'] = SUBJECT msg['From'] = SENDER['email'] msg['To'] = ", ".join(tolist) print("Sending Credentials to {}".format(msg['To'])) print(msg.as_string())
port="5432", password="******", ) return conn except Exception as e: print(e) # print("Error connecting to postgres server at %s:5432" % (ip)) return None # In[ ]: if __name__ == '__main__': for group in CREDENTIALS.keys(): conn = connect(group_IP(group)) # print("Connected successfully...") try: query = QUERY.format(group=group, user=group, pswd=CREDENTIALS[group]) # print(query) conn.autocommit = True conn.cursor().execute("DROP DATABASE IF EXISTS {};".format(group)) conn.commit() conn.cursor().execute(query) conn.commit() except: query = QUERY2.format(group=group, user=group, pswd=CREDENTIALS[group])