def test_export(): #/impexp/bin/3DCityDB-Importer-Exporter -shell –config /share/config/config_export_kml.xml -validate /opt/code/app/static/data/verified/muhammad.hasannudin_3dbldglod2_itb_labtek.gml #result = subprocess.run(['/impexp/bin/3DCityDB-Importer-Exporter', '-shell', '-version'], stdout=subprocess.PIPE) #result = subprocess.run(['/impexp/bin/3DCityDB-Importer-Exporter', '-shell', '-config', '/share/config/config_export_kml.xml', '-version'], stdout=subprocess.PIPE) #result = subprocess.run(['/impexp/bin/3DCityDB-Importer-Exporter', '-shell', '-config', '/share/config/config_export_kml.xml', '-validate', '/opt/code/app/static/data/verified/muhammad.hasannudin_3dbldglod2_itb_labtek.gml'], stdout=subprocess.PIPE) #return "test_export " + result.stdout.decode('utf-8') #generate config_file to dinamically change #return export_gml("config_export_kml.xml", "muhammad.hasannudin_3dbldglod2_itb_labtek.gml") #return export_gml("config_export_kml.xml", "yusa.tuberlin_3DBuildingSchemaLOD2_itb_sabuga.gml") #return export_gml("config_export_kml.xml", "muhammad.hasannudin_3dbldglod2_itb_octagon.gml") #generate config? filename = "yusa.tuberlin_3DBuildingSchemaLOD2_itb_geologi.gml" base = os.path.splitext(filename)[0] file_xml = '/share/config/' + base + '.xml' file_xml_up = '/share/config/config_export_kml_up.txt' file_xml_down = '/share/config/config_export_kml_down.txt' #print(file_xml) r = requests.get(url=server_api + 'cityobject/limit/' + str(21)) d = r.json() if r is not None: if r.status_code == 200: c = open(file_xml_up, "r") g = open(file_xml_down, "r") f = open(file_xml, "w+") f.write(c.read()) for i in d['data']: #print(str(i.gmlid)) f.write(" <id>" + str(i["gmlid"]) + "</id>\n") #list_gml_id.append(str(i["gmlid"]) f.write(g.read()) f.close() arr = os.listdir('/share/config/') print(arr) return export_gml(base + '.xml')
def init_db(): #定义一个初始化数据库的函数(文档提到要把这个函数放在connect_db函数的后面,一定吗?) with app.app_context(): #由于是定义一个函数,所以我们并没有请求,也就没有建立应用环境,无法使用g。这一语句就是创建应用环境 db = get_db() with app.open_resource('schema.sql', mode='r') as f: #接触了新的方法.open_resource 和新的语法 with as a=f.read() #read()方法只能读一次 一定要用变量保存 db.cursor().executescript(a) #open_resource方法从app所在的位置打开文件schema.sql with app.open_resource('dbuser.sql', mode='r') as g: #创建了一个叫user的table 存放注册用户信息 b=g.read() db.cursor().executescript(b) db.commit() #但是这句和db.cursor().executescript(\path\schema.sql)有什么区别呢??
def download_fresh_db(): app.logger.info("downloading fresh database from: {}".format(DB_FILE_URL)) req = requests.get(DB_FILE_URL, stream=True) gzip_file_location = "{}.gz".format(DB_FILE_LOCATION) with open(gzip_file_location,'wb') as f: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() app.logger.info("decompressing database file...") with open(DB_FILE_LOCATION, 'wb') as f: with gzip.open(gzip_file_location, 'rb') as g: f.write(g.read())
def download_fresh_db(): application.logger.info("downloading fresh database from: {}".format(DB_FILE_URL)) req = requests.get(DB_FILE_URL, stream=True) gzip_file_location = "{}.gz".format(DB_FILE_LOCATION) with open(gzip_file_location,'wb') as f: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() application.logger.info("decompressing database file...") with open(DB_FILE_LOCATION, 'wb') as f: with gzip.open(gzip_file_location, 'rb') as g: f.write(g.read())
def cohortCoordinatedQuery(): print('INCOHORTCOORDINATED') requestQueryString = request.query_string.decode('UTF-8') print(requestQueryString) ccc = request.args.get('ccc', '0') party = request.args.get('party', '1') spdzHost = request.args.get('host', '1') # First Step: do our own database query for cohort count conn = sqlite3.connect(app.config['DATABASE']) c=conn.cursor() query ="select candidatecount from candidates where study = '" + ccc + "' ;" print('Query='+ query) c.execute(query) data=c.fetchone() if data is None: localCount = 222222 else: localCount = int(data[0]) localCountResultString = str(localCount) with open('/home/ec2-user/others') as f: others = f.read().splitlines() with open('/home/ec2-user/me') as g: me = g.read().splitlines() # Second: start the SPDZ client with our count. This will return (eventually, # after the next few steps run) return the total SMC-computed count. playercmd = 'nohup /home/ec2-user/impact-bin/runSMC.sh ' + localCountResultString + ' ' + spdzHost.strip()+ ' ' + party + ' >&/dev/null &' print ("playercmd = " + playercmd) CallerResult = subprocess.run(playercmd, shell = True, stdout=subprocess.PIPE) print ('coord query ran the playercmd OK.') # Third: return a success code - SPDZ/2 is working in the background, we don't know # if it's going to work or not. return 'SUCCESS\n'
def check_gml(): id = request.args.get('id') #print(request.args) #return request.args.get('id') if 'username' in session: if session['type_id'] == 3: form = VerifyGmlForm() if request.method == 'POST': if form.validate_on_submit(): ##user = request.form['nm'] #flash('Your file has been uploaded successfully') #return redirect(url_for('home.check_gml')+"?id="+id) #return "POST" #send to database if int(form.respons.data) + 1 == 3: #approved #move file city gml to verified print("moved") filename = form.filename.data if os.path.exists(os.path.join(UPLOAD_FOLDER, filename)): os.rename(os.path.join(UPLOAD_FOLDER, filename), os.path.join(VERIFIED_FOLDER, filename)) #check validitas gml #print(type(validate_gml(filename))) #if "is valid" in validate_gml(filename): # #trigger export import to 3dcitydb # print("imported") # import_gml(filename) v = validate_gml(filename) print(v) if "is valid" in v: #trigger export import to 3dcitydb print("imported") r = import_gml(filename) print(r) if "ERROR" in r: flash("error imported") else: #sp = r.split("gen:GenericCityObject: ", 1) sp = r.split("bldg:Building: ", 1) #print(sp[1]) number = sp[1].split("\n", 1) #print(len(number[0])) print(number[0]) #print(type(number[0])) #jumlah = int(number[0]) #print("jumlah : " + jumlah) #return "Okay" data = { "validator": session['username'], "status_id": int(form.respons.data) + 1, "reason": form.reason.data, "id": int(id) } headers = { 'Authorization': session['Authorization'] } # sending post request and saving response as response object r = requests.post(url=server_api + 'city_gml/verify/', json=data, headers=headers) #print(r.json()) #print(r.status_code) print(r.text) a = r.json() #print(a["status"]) if r is not None: if r.status_code == 202: gml_id = form.city_gml_id.data r = requests.get(url=server_api + 'cityobject/last/' + number[0]) d = r.json() if r is not None: if r.status_code == 200: list_gml_id = [] #filename = "yusa.tuberlin_3DBuildingSchemaLOD2_itb_geologi.gml"; base = os.path.splitext( filename)[0] file_xml = '/share/config/' + base + '.xml' file_xml_up = '/share/config/config_export_kml_up.txt' file_xml_down = '/share/config/config_export_kml_down.txt' #print(file_xml) c = open(file_xml_up, "r") g = open(file_xml_down, "r") f = open(file_xml, "w+") f.write(c.read()) for i in d['data']: #print(str(i.gmlid)) list_gml_id.append( str(i["gmlid"])) f.write( " <id>" + str(i["gmlid"]) + "</id>\n") f.write(g.read()) f.close() zz = export_gml(base + '.xml') print(zz) if "ERROR" in zz: flash( "error generating kml") else: print("hola") data = { "list_gml_id": list_gml_id, "city_gml_id": int(gml_id) } headers = { 'Authorization': session[ 'Authorization'] } r = requests.post( url=server_api + 'buildings/', json=data, headers=headers) e = r.json() #print(e) if r is not None: if r.status_code == 201: #m = requests.get(url = server_api+'/monitoring/attribute/last/'+ sp[1]) #w = r.json() #if m is not None: #insert into monitoring? print(e) flash( 'Your review has been saved successfully' ) return redirect( url_for('home.check_gml') + "?id=" + id) else: flash("Status: " + a["status"] + ' : ' + a["message"]) return redirect( url_for('home.check_gml') + "?id=" + id) else: flash('Can not connect to API') return redirect( url_for('home.check_gml') + "?id=" + id) else: print("not valid") flash("not valid") return redirect( url_for('home.check_gml') + "?id=" + id) else: #rejected data = { "validator": session['username'], "status_id": int(form.respons.data) + 1, "reason": form.reason.data, "id": int(id) } headers = {'Authorization': session['Authorization']} # sending post request and saving response as response object r = requests.post(url=server_api + 'city_gml/verify/', json=data, headers=headers) #print(r.json()) #print(r.status_code) print(r.text) a = r.json() #print(a["status"]) if r is not None: if r.status_code == 202: flash( 'Your review has been saved successfully') return redirect( url_for('home.check_gml') + "?id=" + id) else: flash("Status: " + a["status"] + ' : ' + a["message"]) return redirect( url_for('home.check_gml') + "?id=" + id) else: flash('Can not connect to API') return redirect( url_for('home.check_gml') + "?id=" + id) #return "GET" print("GET") return render_template('check_gml.html', id=id, url_api=url_api, form=form) else: return redirect(url_for('home.contributor')) else: return redirect(url_for('home.login'))
def cohortQuery(): print('INCOHORTQUERY') requestQueryString = request.query_string.decode('UTF-8') print(requestQueryString) ccc = request.args.get('ccc', '0') # First Step: do our own database query for cohort count conn = sqlite3.connect(app.config['DATABASE']) c=conn.cursor() query ="select candidatecount from candidates where study = '" + ccc + "' ;" print('Query='+ query) c.execute(query) data=c.fetchone() if data is None: localCount = 222222 else: localCount = int(data[0]) localCountResultString = str(localCount) # Secondly: start the Server.x process to assign SPDZ connections to clients # serverxcmd = '/home/ec2-user/impact-bin/startServer.sh' # CallerResult = subprocess.run(serverxcmd, shell = True, stdout=subprocess.PIPE) # read all other servers with open('/home/ec2-user/others') as f: others = f.read().splitlines() with open('/home/ec2-user/me') as g: me = g.read().splitlines() # Third: start the remote queries that fetch their value and use the SPDZ client # to do their computations. We don't even care about the return values. qstr=[] qstr.append('http://' + others[0].strip() + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=1') qstr.append('http://' + others[1].strip() + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=2') # print('IT EQUALS: http://' + others[0].strip() + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=1') # print('IT EQUALS: http://' + others[1].strip() + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=2') print('IT EQUALS: ' + qstr[0]) print('IT EQUALS: ' + qstr[1]) idx=0 parties=[] for u in others: parties.append(u.strip().split(':')[0]) print ('PARTY ' + parties[idx]) idx = idx+ 1 # with urllib.request.urlopen('http://' + others[0].strip() + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=1') as f: # throwaway = f.read(1000) # with urllib.request.urlopen('http://' + others[1].strip() + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() +'&party=2') as f: # throwaway = f.read(1000) with urllib.request.urlopen(qstr[0]) as f: throwaway = f.read(1000) with urllib.request.urlopen(qstr[1]) as f: throwaway = f.read(1000) print('THROWAWAY: ' + throwaway.decode('UTF-8')) # Fourth: start the SPDZ client with our count. This will return (eventually, # after the next few steps run) return the total SMC-computed count. clientxcmd = '/home/ec2-user/impact-bin/runSMC.sh ' + localCountResultString + ' ' + me[0].strip()+ ' 0' print (clientxcmd) CallerResult = subprocess.run(clientxcmd, shell = True, stdout=subprocess.PIPE) print('RAN THE SUBPROCESS (runSMC.sh) ') # Fifth: remember the first SPDZ client, running on the main machine? That's the one # that returns a value. Read that and print it. with open('/tmp/resultTotal') as f: theFinalResult = f.read() print ('theFinalResult = ' + theFinalResult) #end of cohortQuery return theFinalResult+ '\n'
def cohortQuery(): print('INCOHORTQUERY') requestQueryString = request.query_string.decode('UTF-8') print(requestQueryString) ccc = request.args.get('ccc', '0') # First Step: do our own database query OR AN "ICEES" QUERY for cohort count localCountResultString = queryForMatchCount(ccc, app.config['DATABASE']) # Secondly: start the Server.x process to assign SPDZ connections to clients # NOTE: NOW RUNNING SPDZ/2 IN PEER-TO-PEER, SERVERLESS MODE. WILL # PROBABLY REMOVE THIS CODE VERY SOON. # serverxcmd = '/home/ec2-user/impact-bin/startServer.sh' # CallerResult = subprocess.run(serverxcmd, shell = True, stdout=subprocess.PIPE) # read all other servers with open('/home/ec2-user/others') as f: others = f.read().splitlines() with open('/home/ec2-user/me') as g: me = g.read().splitlines() with open('/home/ec2-user/port') as h: portNum = h.readline().strip() # Third: start the remote queries that fetch their value and use the SPDZ client # to do their computations. We don't even care about the return values. qstr=[] qstr.append('http://' + others[0].strip() + ":" + str(portNum) + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=1') qstr.append('http://' + others[1].strip() + ":" + str(portNum) + '/v2/cohortCoordinatedQuery?ccc=' + ccc + '&host=' + me[0].strip() + '&party=2') print('IT EQUALS: ' + qstr[0]) print('IT EQUALS: ' + qstr[1]) # Looks like this next block is left over debugging code. It's a shame # there isn't a good way to get any debugging in flask code. # There's too much timing dependency and supporting cruft (process # state in Linux, for instance - hupped?) to run as standalone code in spyder. # idx=0 # for u in others: # print ('PARTY ' + u) # # it's OK to do these next two sequentially because the server's # cohortCoordinatedQuery always returns immediately. The SPDZ/2 # engine is the only asynchronous component. with urllib.request.urlopen(qstr[0]) as f: throwaway = f.read(1000) with urllib.request.urlopen(qstr[1]) as f: throwaway = f.read(1000) print('THROWAWAY: ' + throwaway.decode('UTF-8')) # Fourth: start the SPDZ client with our count. This will return (eventually, # returning the total SMC-computed count. clientxcmd = '/home/ec2-user/impact-bin/runSMC.sh ' + localCountResultString + ' ' + me[0].strip()+ ' 0' print (clientxcmd) CallerResult = subprocess.run(clientxcmd, shell = True, stdout=subprocess.PIPE) print('RAN THE SUBPROCESS (runSMC.sh) ') # Fifth: remember the first SPDZ client, running on the main machine? That's the one # that returns a value. Read that and print it. with open('/tmp/resultTotal') as f: theFinalResult = f.read() print ('theFinalResult = ' + theFinalResult) #end of cohortQuery return theFinalResult+ '\n'
APP = Flask(__name__) APP.secret_key = urandom(32) APP.config.from_mapping(DATABASE="flaskr/database.db") APP.jinja_env.globals.update(zip=zip) APP.register_blueprint(GOOGLE) APP.register_blueprint(REDDIT) with open("flaskr/database.db", "w+") as f: db = connect(APP.config["DATABASE"]) with open("flaskr/schema.sql") as g: db.executescript(g.read()) g.close() db.close() f.close() @APP.before_request def database_connection(): """ Handle automatic request based database connection """ conn() @APP.teardown_request def close_database_connection(Exception):