def temp_waterml(request, id): base_path = utilities.get_workspace()+"/id" file_path = base_path + "/" +id response = HttpResponse(FileWrapper(open(file_path)), content_type='application/xml') print datetime.now() return response
def response(request): service_url = 'http://hydroportal.cuahsi.org/nwisdv/cuahsi_1_1.asmx?WSDL' # service_url = 'http://hiscentral.cuahsi.org/webservices/hiscentral.asmx?WSDL' # # site_code = '10147100' site_code = 'ODM:010210JHI' # variable_code = 'ODM:Discharge' variable_code = 'NWISDV:00060' client = connect_wsdl_url(service_url) # print client start_date ='' end_date = '' auth_token = '' response1 = client.service.GetValues(site_code, variable_code, start_date, end_date, auth_token) print response1 response= urllib2.urlopen('http://hiscentral.cuahsi.org/webservices/hiscentral.asmx/GetWaterOneFlowServiceInfo') html = response.read() temp_dir = utilities.get_workspace() file_temp_name = temp_dir + '/id/' + 'WaterOneFlowServiceInfo' + '.xml' file_temp = open(file_temp_name, 'wb') file_temp.write(html) file_temp.close() service_url = utilities.parse_service_info(file_temp_name) # service_url = 'http://hiscentral.cuahsi.org/webservices/hiscentral.asmx?WSDL' # client = connect_wsdl_url(service_url) # print client # print response1 # response1 = {"File uploaded":"sucess"} # base_path = utilities.get_workspace()+"/hydroshare" # file_path = base_path + "/" +title # response = HttpResponse(FileWrapper(open(file_path)), content_type='application/xml') # return response1 return service_url
def save_file(request, res_id, file_name, src, save_type): script = request.POST.get('script') file_path = utilities.get_workspace() + '/id' root_dir = file_path + '/' + res_id data_dir = root_dir + '/' + res_id + '/data/contents/' + file_name print data_dir try: if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) if save_type == 'save': # os.remove(data_dir) with open(data_dir, 'wb') as f: f.write(script) hs.deleteResourceFile(res_id, file_name) # raw_input('PAUSED') hs.addResourceFile(res_id, data_dir) else: with open(data_dir, 'wb') as f: f.write(script) hs.addResourceFile(res_id, data_dir) # raw_input('PAUSED') shutil.rmtree(root_dir) file = {"File Uploaded": file_name} except: file = {"File not saved": file_name} return JsonResponse(file)
def save_file(request, res_id, file_name, src, save_type): script = request.POST.get('script') file_path = utilities.get_workspace() + '/id' root_dir = file_path + '/' + res_id data_dir = root_dir + '/' + res_id + '/data/contents/' + file_name print data_dir try: if save_type == 'save': # os.remove(data_dir) with open(data_dir, 'wb') as f: f.write(script) hs = getOAuthHS(request) hs.deleteResourceFile(res_id, file_name) # raw_input('PAUSED') hs.addResourceFile(res_id, data_dir) else: with open(data_dir, 'wb') as f: f.write(script) hs = getOAuthHS(request) hs.addResourceFile(res_id, data_dir) # raw_input('PAUSED') shutil.rmtree(root_dir) file = {"File Uploaded": file_name} except: file = {"File not saved": file_name} return JsonResponse(file)
def init_hydroshare_gis_layers_db(first_time): Base.metadata.create_all(engine) if first_time: spatial_dataset_engine = get_spatial_dataset_engine(name='default') spatial_dataset_engine.delete_workspace(workspace_id=get_workspace(), purge=True, recurse=True)
def showfile(request, id): """ Controller for the app home page. """ if not id.endswith('.zip'): id = id + '.zip' base_path = utilities.get_workspace() file_path = os.path.join(base_path, id) response = HttpResponse(FileWrapper(open(file_path)), content_type='application/zip') response['Content-Disposition'] = 'attachment; filename="' + id + '"' return response
def delete_file(request, res_id, file_name, src): try: file_path = utilities.get_workspace() + '/id' root_dir = file_path + '/' + res_id data_dir = root_dir + '/' + res_id + '/data/contents/' + file_name hs = getOAuthHS(request) hs.deleteResourceFile(res_id, file_name) shutil.rmtree(root_dir) file = {"File Deleted": file_name} except: file = {'File not Deleted': file_name} return JsonResponse(file)
def error_report(request): print os.path.realpath('controllers.py') temp_dir = utilities.get_workspace() temp_dir = temp_dir[:-24] file_path = temp_dir + '/error_report.txt' if not os.path.exists(temp_dir+"/error_report.txt"): file_temp = open(file_path, 'a') file_temp.close() content = '' else: file_temp = open(file_path, 'r') content = file_temp.read() return JsonResponse({"Error Reports":content})
def home(request): """ Controller for the app home page. """ added = False waterml_url = '' # action if a new URL was added.... zip_info = utilities.list_zip_files(request) print zip_info workspace = utilities.get_workspace() context = {'file_info': zip_info, 'workspace': workspace} return render(request, 'data_cart/home.html', context)
def home(request): ids = [] meta = [] source = [] quality = [] method = [] sourceid = [] serviceurl = [] data = request.META['QUERY_STRING'] data = data.encode(encoding='UTF-8') base_path = utilities.get_workspace( ) + "/id/timeseriesLayerResource.json.refts" if request.user.is_authenticated(): login1 = 'True' else: login1 = 'False' print request.body body = request.body try: decode11 = request.GET['data'] except: decode11 = 'nothing' try: decode_body = json.loads(request.body.decode("utf-8")) except: decode_body = "no data" try: form_body = request.POST except: form_body = "no data" # form_body = '{"fileVersion":1,"title":"HydroClient-2017-01-09T17:46:47.810Z","abstract":"Retrieved timeseries...","symbol":"http://data.cuahsi.org/content/images/cuahsi_logo_small.png","keyWords":["Time Series","CUAHSI"],"REFTS":[{"refType":"WOF","serviceType":"SOAP","url":"http://hydro1.sci.gsfc.nasa.gov/daac-bin/his/1.0/GLDAS_NOAH_001.cgi?WSDL","site":"X282-Y404 of Global Land Data Assimilation System (GLDAS) NASA","siteCode":"GLDAS_NOAH:X282-Y404","variable":"Surface runoff","variableCode":"GLDAS:GLDAS_NOAH025_3H.001:Qs","networkName":"GLDAS_NOAH","beginDate":"2016-01-09T00:00:00","endDate":"2016-09-30T21:00:00+00:00","returnType":"WaterML 1.0","location":{"latitude":41.125,"longitude":-109.375}}]}' with open(base_path, 'w') as outfile: json.dump(form_body, outfile) """ Controller for the app home page. """ # utilities.append_ts_layer_resource("testtt",'test') context = { 'source': body, 'cuahsi_ids': decode_body, 'quality': form_body, 'method': request, 'sourceid': sourceid, 'serviceurl': serviceurl, 'login1': login1 } return render(request, 'hydroshare_resource_creator/home.html', context)
def home(request): ids=[] meta =[] source=[] quality=[] method=[] sourceid=[] serviceurl=[] data = request.META['QUERY_STRING'] data = data.encode(encoding ='UTF-8') base_path = utilities.get_workspace() + "/id/timeseriesLayerResource.json.refts" if request.user.is_authenticated(): login1 = 'True' else: login1 ='False' print request.body body = request.body try: decode11 = request.GET['data'] except: decode11 = 'nothing' try: decode_body = json.loads(request.body.decode("utf-8")) except: decode_body = "no data" try: form_body = request.POST except: form_body = "no data" # form_body = '{"fileVersion":1,"title":"HydroClient-2017-01-09T17:46:47.810Z","abstract":"Retrieved timeseries...","symbol":"http://data.cuahsi.org/content/images/cuahsi_logo_small.png","keyWords":["Time Series","CUAHSI"],"REFTS":[{"refType":"WOF","serviceType":"SOAP","url":"http://hydro1.sci.gsfc.nasa.gov/daac-bin/his/1.0/GLDAS_NOAH_001.cgi?WSDL","site":"X282-Y404 of Global Land Data Assimilation System (GLDAS) NASA","siteCode":"GLDAS_NOAH:X282-Y404","variable":"Surface runoff","variableCode":"GLDAS:GLDAS_NOAH025_3H.001:Qs","networkName":"GLDAS_NOAH","beginDate":"2016-01-09T00:00:00","endDate":"2016-09-30T21:00:00+00:00","returnType":"WaterML 1.0","location":{"latitude":41.125,"longitude":-109.375}}]}' with open(base_path, 'w') as outfile: json.dump(form_body, outfile) """ Controller for the app home page. """ # utilities.append_ts_layer_resource("testtt",'test') context = {'source':body, 'cuahsi_ids':decode_body, 'quality':form_body, 'method':request, 'sourceid':sourceid, 'serviceurl':serviceurl, 'login1':login1 } return render(request, 'hydroshare_resource_creator/home.html', context)
def delete_file(request, res_id, file_name, src): try: if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) file_path = utilities.get_workspace() + '/id' root_dir = file_path + '/' + res_id data_dir = root_dir + '/' + res_id + '/data/contents/' + file_name hs.deleteResourceFile(res_id, file_name) shutil.rmtree(root_dir) file = {"File Deleted": file_name} except: file = {'File not Deleted':file_name} return JsonResponse(file)
def chart_data(request, res_id, src,id_qms): data_for_chart =[] test = '' file_number =0 xml_id = None xml_rest = False if "xmlrest" in src:#id from USGS Gauge Viewer app xml_rest = True test = request.POST.get('url_xml') xml_id = str(uuid.uuid4())#creates a unique id for the time series # print datetime.now() # checks if we already have an unzipped xml file file_path = utilities.waterml_file_path(res_id,xml_rest,xml_id) if src =='hydroshare': file_number = utilities.unzip_waterml(request, res_id, src, test,xml_id) else: if not os.path.exists(file_path): file_number = utilities.unzip_waterml(request, res_id, src, test,xml_id) # if we don't have the xml file, downloads and unzips it if file_number >0: for i in range(0,file_number): print i temp_dir = utilities.get_workspace() file_path = temp_dir+'/id/timeserieslayer' + str(i) + '.xml' data_for_chart.append(utilities.Original_Checker(file_path,id_qms)) # data_for_chart.append({'file_number':file_number}) else: # returns an error message if the unzip_waterml failed if not os.path.exists(file_path): data_for_chart = {'status': 'Resource file not found'} else: # parses the WaterML to a chart data object data_for_chart.append(utilities.Original_Checker(file_path,id_qms)) # print "JSON Reponse" # print datetime.now() print "end of chart data" return JsonResponse({'data':data_for_chart})
def chart_data(request,res_id): data_for_chart={} error='' data1=None #parse xml data from 'data' from data_for_js and prepare for the table if res_id =='None': data = utilities.parse_JSON() print type(data) print "ddddddddddddddddddddddddddddddddd" print data try: data1 = data['timeSeriesLayerResource'] data1 = json.loads(data1) except: data1='' print data1 if data1=='': try: data1 = data['timeSeriesLayerResource'] except: data1='' # data_n = urllib.unquote(data1).decode(encoding ="UTF-8") # print data_n if data1 =='': error = "No data in file" else: error='' else: temp_dir = utilities.get_workspace() root_dir = temp_dir + '/id/' + res_id try: shutil.rmtree(root_dir) except: nothing =None if not os.path.exists(temp_dir+"/id"): os.makedirs(temp_dir+"/id") else: if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) file_path = temp_dir + '/id' hs.getResource(res_id, destination=file_path, unzip=True) root_dir = file_path + '/' + res_id data_dir = root_dir + '/' + res_id + '/data/contents/' for subdir, dirs, files in os.walk(data_dir): for file in files: if 'wml_1_' in file: data_file = data_dir + file with open(data_file, 'r') as f: # print f.read() file_data = f.read() f.close() file_temp_name = temp_dir + '/id/' + res_id + '.xml' file_temp = open(file_temp_name, 'wb') file_temp.write(file_data) file_temp.close() if '.json.refts' in file: data_file = data_dir +file with open(data_file, 'r') as f: file_data = f.read() print file_data data = file_data.encode(encoding ='UTF-8') print data data1 = json.loads(data) data1 = data1['timeSeriesLayerResource'] # data_for_chart = {"data": '{"fileVersion":1,"title":"HydroClient-2017-01-09T17:46:47.810Z","abstract":"Retrieved timeseries...","symbol":"http://data.cuahsi.org/content/images/cuahsi_logo_small.png","keyWords":["Time Series","CUAHSI"],"REFTS":[{"refType":"WOF","serviceType":"SOAP","url":"http://hydro1.sci.gsfc.nasa.gov/daac-bin/his/1.0/GLDAS_NOAH_001.cgi?WSDL","site":"X282-Y404 of Global Land Data Assimilation System (GLDAS) NASA","siteCode":"GLDAS_NOAH:X282-Y404","variable":"Surface runoff","variableCode":"GLDAS:GLDAS_NOAH025_3H.001:Qs","networkName":"GLDAS_NOAH","beginDate":"2016-01-09T00:00:00","endDate":"2016-09-30T21:00:00+00:00","returnType":"WaterML 1.0","location":{"latitude":41.125,"longitude":-109.375}}]}','error':error} data_for_chart = {"data": data1,'error':error} return JsonResponse(data_for_chart)
def temp_waterml(request, id): base_path = utilities.get_workspace() + "/id" file_path = base_path + "/" + id response = HttpResponse(FileWrapper(open(file_path)), content_type='application/xml') return response
def chart_data(request, res_id, src): data_for_chart = {} error = False is_owner = False print "JSON Reponse" print datetime.now() print "update" # Downloading all files types that work with app from hydroshare file_path = utilities.get_workspace() + '/id' root_dir = file_path + '/' + res_id try: shutil.rmtree(root_dir) except: nothing =None try: if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) hs.getResource(res_id, destination=file_path, unzip=True) data_dir = root_dir + '/' + res_id + '/data/contents/' # f = open(data_dir) # print f.read() for subdir, dirs, files in os.walk(data_dir): for file in files: # if '.r' in file or '.R' in file or'.py' in file or '.m' in file or '.txt' in file or '.xml' in file: data_file = data_dir + file with open(data_file, 'r') as f: # print f.read() data = f.read() # print data f.close() print data try: data= data.decode('latin-1') except: data = data data_for_chart.update({str(file): data}) # data_for_chart = {'bjo':'hello'} user = hs.getUserInfo() user1 = user['username'] # resource = hs.getResourceList(user ='******') resource = hs.getResourceList(owner = user1) for res in resource: # print res id = res["resource_id"] # print id if(res_id ==res["resource_id"]): is_owner = True except Exception as inst: data_for_chart = 'You are not authorized to access this resource' owner = False error = True print 'start' print(type(inst)) print(inst.args) try: data_for_chart = str(inst) except: data_for_chart = "There was an error loading data for resource"+res_id print "end" return JsonResponse({"data":data_for_chart,"owner":is_owner,"error":error})
def chart_data(request, res_id): data_for_chart = {} error = '' data1 = None #parse xml data from 'data' from data_for_js and prepare for the table if res_id == 'None': data = utilities.parse_JSON() print type(data) print "ddddddddddddddddddddddddddddddddd" print data try: data1 = data['timeSeriesLayerResource'] data1 = json.loads(data1) except: data1 = '' print data1 if data1 == '': try: data1 = data['timeSeriesLayerResource'] except: data1 = '' # data_n = urllib.unquote(data1).decode(encoding ="UTF-8") # print data_n if data1 == '': error = "No data in file" else: error = '' else: temp_dir = utilities.get_workspace() root_dir = temp_dir + '/id/' + res_id try: shutil.rmtree(root_dir) except: nothing = None if not os.path.exists(temp_dir + "/id"): os.makedirs(temp_dir + "/id") else: if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) file_path = temp_dir + '/id' hs.getResource(res_id, destination=file_path, unzip=True) root_dir = file_path + '/' + res_id data_dir = root_dir + '/' + res_id + '/data/contents/' for subdir, dirs, files in os.walk(data_dir): for file in files: if 'wml_1_' in file: data_file = data_dir + file with open(data_file, 'r') as f: # print f.read() file_data = f.read() f.close() file_temp_name = temp_dir + '/id/' + res_id + '.xml' file_temp = open(file_temp_name, 'wb') file_temp.write(file_data) file_temp.close() if '.json.refts' in file: data_file = data_dir + file with open(data_file, 'r') as f: file_data = f.read() print file_data data = file_data.encode(encoding='UTF-8') print data data1 = json.loads(data) data1 = data1['timeSeriesLayerResource'] # data_for_chart = {"data": '{"fileVersion":1,"title":"HydroClient-2017-01-09T17:46:47.810Z","abstract":"Retrieved timeseries...","symbol":"http://data.cuahsi.org/content/images/cuahsi_logo_small.png","keyWords":["Time Series","CUAHSI"],"REFTS":[{"refType":"WOF","serviceType":"SOAP","url":"http://hydro1.sci.gsfc.nasa.gov/daac-bin/his/1.0/GLDAS_NOAH_001.cgi?WSDL","site":"X282-Y404 of Global Land Data Assimilation System (GLDAS) NASA","siteCode":"GLDAS_NOAH:X282-Y404","variable":"Surface runoff","variableCode":"GLDAS:GLDAS_NOAH025_3H.001:Qs","networkName":"GLDAS_NOAH","beginDate":"2016-01-09T00:00:00","endDate":"2016-09-30T21:00:00+00:00","returnType":"WaterML 1.0","location":{"latitude":41.125,"longitude":-109.375}}]}','error':error} data_for_chart = {"data": data1, 'error': error} return JsonResponse(data_for_chart)
def view_counter(request): temp_dir = utilities.get_workspace() file_path = temp_dir[:-24] + 'view_counter.txt' file_temp = open(file_path, 'r') content = file_temp.read() return JsonResponse({"Number of Viewers":content})
def create_layer(request, fun_type, res_id): resource_id = None data_stor = [] int_resource = [] counter = 0 title = str( request.POST.get('resTitle')) # causing errors because not strints? abstract = str(request.POST.get('resAbstract')) keywords = str(request.POST.get('resKeywords')) res_access = str(request.POST.get('resAccess')) keywords = keywords.split(',') str_resource = request.POST.get('checked_ids') print "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa" print str_resource str_resource = trim(str_resource) for res in str_resource: int_resource.append(int(res)) print int_resource metadata = [] if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) temp_dir = utilities.get_workspace() file_name = title.replace(" ", "") file_path = temp_dir + '/id/timeseriesLayerResource.json.refts' fpath = temp_dir + '/id/' + file_name + '.json.refts' print fpath with open(file_path, 'r') as outfile: file_data = outfile.read() data = file_data.encode(encoding='UTF-8') data = json.loads(data) print data data = data['timeSeriesLayerResource'] try: data_symbol = data['symbol'] data_file = data['fileVersion'] except: data = json.loads(data) data_symbol = data['symbol'] data_file = data['fileVersion'] for i in data['REFTS']: if counter in int_resource: data_stor.append(i) counter = counter + 1 data_dic = { "REFTS": data_stor, "fileVersion": data_file, "title": title, "symbol": data_symbol, "abstract": abstract, 'keyWords': keywords } data.update(data_dic) final_dic = {"timeSeriesLayerResource": data} with open(fpath, 'w') as outfile: json.dump(final_dic, outfile) r_type = 'GenericResource' r_title = title r_keywords = (keywords) r_abstract = abstract print res_id if fun_type == 'create': try: print "creating resource" resource_id = hs.createResource(r_type, r_title, resource_file=fpath, keywords=r_keywords, abstract=r_abstract, metadata=metadata) except: resource_id = "error" elif fun_type == 'update': try: print "Updating resource" try: resource_id = hs.deleteResourceFile(res_id, fpath + '.json.refts') except: print 'file doesnt exist' resource_id = hs.addResourceFile(res_id, fpath) except: resource_id = "error" # if res_access == 'public': # hs.setAccessRules(resource_id, public=True) # else: # hs.setAccessRules(resource_id, public=False) #upload to hydroshare stuff return JsonResponse({'Request': resource_id})
def create_layer(request,fun_type,res_id): resource_id=None data_stor=[] int_resource=[] counter=0 title = str(request.POST.get('resTitle'))# causing errors because not strints? abstract = str(request.POST.get('resAbstract')) keywords = str(request.POST.get('resKeywords')) res_access = str(request.POST.get('resAccess')) keywords = keywords.split(',') str_resource = request.POST.get('checked_ids') print "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa" print str_resource str_resource = trim(str_resource) for res in str_resource: int_resource.append(int(res)) print int_resource metadata = [] if use_hs_client_helper: hs = get_oauth_hs(request) else: hs = getOAuthHS(request) temp_dir = utilities.get_workspace() file_name = title.replace(" ", "") file_path = temp_dir + '/id/timeseriesLayerResource.json.refts' fpath = temp_dir + '/id/'+file_name+'.json.refts' print fpath with open(file_path, 'r') as outfile: file_data = outfile.read() data = file_data.encode(encoding ='UTF-8') data = json.loads(data) print data data = data['timeSeriesLayerResource'] try: data_symbol = data['symbol'] data_file =data['fileVersion'] except: data = json.loads(data) data_symbol = data['symbol'] data_file =data['fileVersion'] for i in data['REFTS']: if counter in int_resource: data_stor.append(i) counter = counter+1 data_dic = {"REFTS":data_stor,"fileVersion": data_file, "title": title, "symbol":data_symbol,"abstract": abstract,'keyWords':keywords} data.update(data_dic) final_dic = {"timeSeriesLayerResource":data} with open(fpath, 'w') as outfile: json.dump(final_dic, outfile) r_type = 'GenericResource' r_title = title r_keywords = (keywords) r_abstract = abstract print res_id if fun_type =='create': try: print "creating resource" resource_id = hs.createResource(r_type, r_title, resource_file=fpath, keywords=r_keywords, abstract=r_abstract, metadata=metadata) except: resource_id ="error" elif fun_type =='update': try: print "Updating resource" try: resource_id = hs.deleteResourceFile(res_id, fpath+'.json.refts') except: print 'file doesnt exist' resource_id = hs.addResourceFile(res_id, fpath) except: resource_id ="error" # if res_access == 'public': # hs.setAccessRules(resource_id, public=True) # else: # hs.setAccessRules(resource_id, public=False) #upload to hydroshare stuff return JsonResponse({'Request':resource_id})
def chart_data(request, res_id, src): data_for_chart = {} error = False is_owner = False print "JSON Reponse" print datetime.now() print "update" # Downloading all files types that work with app from hydroshare file_path = utilities.get_workspace() + '/id' root_dir = file_path + '/' + res_id try: shutil.rmtree(root_dir) except: nothing = None try: hs = getOAuthHS(request) hs.getResource(res_id, destination=file_path, unzip=True) data_dir = root_dir + '/' + res_id + '/data/contents/' # f = open(data_dir) # print f.read() for subdir, dirs, files in os.walk(data_dir): for file in files: # if '.r' in file or '.R' in file or'.py' in file or '.m' in file or '.txt' in file or '.xml' in file: data_file = data_dir + file with open(data_file, 'r') as f: # print f.read() data = f.read() # print data f.close() print data try: data = data.decode('latin-1') except: data = data data_for_chart.update({str(file): data}) # data_for_chart = {'bjo':'hello'} user = hs.getUserInfo() user1 = user['username'] # resource = hs.getResourceList(user ='******') resource = hs.getResourceList(owner=user1) for res in resource: # print res id = res["resource_id"] # print id if (res_id == res["resource_id"]): is_owner = True except Exception as inst: data_for_chart = 'You are not authorized to access this resource' owner = False error = True print 'start' print(type(inst)) print(inst.args) try: data_for_chart = str(inst) except: data_for_chart = "There was an error loading data for resource" + res_id print "end" return JsonResponse({ "data": data_for_chart, "owner": is_owner, "error": error })
def create_layer(request,src): # res_id = request.POST.get('checked_ids') # # res_id = res_id.encode(encoding ='UTF-8') # print res_id # resource_type = request.POST.get('resource_type') # # resource_type = resource_type.encode(encoding ='UTF-8') # client_id = 'MYCLIENTID' # client_secret = 'MYCLIENTSECRET' # # auth = HydroShareAuthOAuth2(client_id, client_secret, # username='******', password='******') # hs = HydroShare(auth=auth) # try: # for resource in hs.getResourceList(): # print(resource) # except TokenExpiredError as e: # hs = HydroShare(auth=auth) # for resource in hs.getResourceList(): # print(resource) title = str(request.POST.get('resTitle'))# causing errors because not strints? abstract = str(request.POST.get('resAbstract')) keywords = str(request.POST.get('resKeywords')) keywords = keywords.split(',') # # title = "test" # res_id = trim(res_id) # resource_type = trim(resource_type) service_type = 'test' url = 'test' # print resource_type # utilities.create_ts_layer_resource(title) # # # for id in res_id: # # file_path = utilities.waterml_file_path(id) # # meta_data = utilities.Original_Checker(file_path) # utilities.append_ts_layer_resource(title,meta_data) resource_type = ['ts'] metadata = [] res_id = 'cuahsi-wdc-2016-10-21-64527889' hs = getOAuthHS(request) print keywords #create a time series resource #file needs to be a csv r_type = 'GenericResource' r_title = title r_keywords = (keywords) r_abstract = abstract print r_title, r_keywords,r_abstract temp_dir = utilities.get_workspace() fpath = temp_dir + '/id/timeseriesLayerResource.json.refts' try: resource_id = hs.createResource(r_type, r_title, resource_file=fpath, keywords=r_keywords, abstract=r_abstract, metadata=metadata) except: resource_id ="error" # if(resource_type[0]=='ref_ts'or resource_type[1]=='ref_ts'): # # #rest call # # waterml_url = "http://hydrodata.info/chmi-h/cuahsi_1_1.asmx/GetValuesObject?location=CHMI-H:140&variable=CHMI-H:TEPLOTA&startDate=2015-07-01&endDate=2015-07-10&authToken=" # if(service_type=='REST'): # waterml_url = url+'/GetValueObject' # ref_type = "rest" # metadata.append({"referenceurl": # {"value": waterml_url, # "type": ref_type}}) # r_type = 'RefTimeSeriesResource' # r_title = "test of rest reftime series" # r_keywords = ["test"] # r_abstract = "This is a test of the resource creator" # res_id = hs.createResource(r_type, # r_title, # resource_file=None, # keywords=r_keywords, # abstract=r_abstract, # metadata=json.dumps(metadata)) # # elif (service_type =='SOAP'): # #Soap Call # soap_url = 'http://hydrodata.info/chmi-d/cuahsi_1_1.asmx?wsdl' # site_code = ':248' # var_code = ':SRAZKY' # ref_type = 'soap' # metadata.append({"referenceurl": # {"value": soap_url, # "type": ref_type,}}) # metadata.append({"variable":{'code':var_code}}) # metadata.append({"site":{'code':site_code}}) # r_type = 'RefTimeSeriesResource' # r_title = "test of rest reftime series soap request" # r_keywords = ["test"] # r_abstract = "This is a test of the resource creator" # res_id = hs.createResource(r_type, # r_title, # resource_file=None, # keywords=r_keywords, # abstract=r_abstract, # metadata=json.dumps(metadata)) #upload to hydroshare stuff return JsonResponse({'Request':resource_id})
def temp_waterml(request, id): print "hello" base_path = utilities.get_workspace() file_path = base_path + "/" + id response = HttpResponse(FileWrapper(open(file_path)), content_type="application/xml") return response
def home(request): ids=[] meta =[] source=[] quality=[] method=[] sourceid=[] serviceurl=[] data = request.META['QUERY_STRING'] data = data.encode(encoding ='UTF-8') base_path = utilities.get_workspace() + "/id/timeseriesLayerResource.json.refts" if request.user.is_authenticated(): login1 = 'True' else: login1 ='False' print request.body body = request.body try: decode11 = request.GET['data'] except: decode11 = 'nothing' try: decode_body = json.loads(request.body.decode("utf-8")) except: decode_body = "no data" try: form_body = request.POST except: form_body = "no data" # with open(base_path, 'w') as outfile: # json.dump(form_body, outfile) print decode11 print urllib.unquote(decode11).decode(encoding ="UTF-8") # print data # data =data.split('&') # for e in data: # s= e.split('=') # meta.append(s) # print data # print meta # for e in meta: # print e[0] # if e[0] == 'Source': # source.append(e[1]) # if e[0] == 'WofUri': # ids.append(e[1]) # if e[0] == 'QCLID': # quality.append(e[1]) # if e[0] == 'MethodId': # method.append(e[1]) # if e[0] == 'SourceId': # sourceid.append(e[1]) # if e[0] == 'ServiceURL': # serviceurl.append(e[1]) """ Controller for the app home page. """ # utilities.append_ts_layer_resource("testtt",'test') context = {'source':body, 'cuahsi_ids':decode_body, 'quality':form_body, 'method':request, 'sourceid':sourceid, 'serviceurl':serviceurl, 'login1':login1 } return render(request, 'hydroshare_resource_creator/home.html', context)