def start_file_download(request): global temp_dir, prediction_data, rp_data, total_prediction_comids, total_rp_comids, sorted_prediction_comids, \ sorted_rp_comids, time if request.method == 'GET': get_data = request.GET this_script_path = inspect.getfile(inspect.currentframe()) try: if get_data['res_id'] is not None: temp_dir = tempfile.mkdtemp() file_path = get_data['res_id'] if get_data['src'] == 'iRODS': download = requests.get(file_path, stream=True) filename = os.path.basename(file_path) local_file_path = os.path.join(temp_dir, filename) with open(local_file_path, 'wb') as fd: for chunk in download.iter_content(1024): fd.write(chunk) prediction_data = nc.Dataset(local_file_path, mode="r") elif get_data['src'] == 'hs': auth = HydroShareAuthBasic(username='******', password='******') hs = HydroShare(auth=auth, hostname="playground.hydroshare.org", use_https=False) resource_data = hs.getSystemMetadata(file_path) filename = resource_data['resource_title'] # this will only work if there is only one file in the resource and if # the resource title is the same as filename hs.getResourceFile(file_path, filename, destination=temp_dir) local_file_path = temp_dir + "/" + filename prediction_data = nc.Dataset(local_file_path, mode="r") else: testfile_path = this_script_path.replace('controllers.py', 'public/data/test.nc') prediction_data = nc.Dataset(testfile_path, mode="r") # Sort the RAPID netCDF file by COMID qout_dimensions = prediction_data.variables['Qout'].dimensions if qout_dimensions[0].lower() == 'comid' and qout_dimensions[1].lower() == 'time': sorted_prediction_comids = sorted(enumerate(prediction_data.variables['COMID'][:]), key=lambda comid: comid[1]) total_prediction_comids = len(sorted_prediction_comids) else: return JsonResponse({'error': "Invalid netCDF file"}) variables = prediction_data.variables.keys() if 'time' in variables: time = [t * 1000 for t in prediction_data.variables['time'][:]] else: return JsonResponse({'error': "Invalid netCDF file"}) rp_data_path = this_script_path.replace('controllers.py', 'public/data/return_period_data.nc') rp_data = nc.Dataset(rp_data_path, mode="r") sorted_rp_comids = sorted(enumerate(rp_data.variables['COMID'][:]), key=lambda comid: comid[1]) total_rp_comids = len(sorted_rp_comids) return JsonResponse({'success': "The file is ready to go."}) except Exception, err: return JsonResponse({'error': err})
def download_epanet_model(request): return_obj = {'success': False, 'message': None, 'results': "", 'name': ""} if request.is_ajax() and request.method == 'GET': if not request.GET.get('model_id'): return_obj['message'] = message_template_param_unfilled.format( param='model_id') else: model_id = request.GET['model_id'] try: hs = get_oauth_hs(request) except: hs = HydroShare() for model_file in hs.getResourceFileList(model_id): model_url = model_file['url'] model_name = model_url[model_url.find('contents/') + 9:] model = "" for line in hs.getResourceFile(model_id, model_name): model += line.decode("utf-8") return_obj['name'] = model_name return_obj['results'] = model return_obj['success'] = True else: return_obj['message'] = message_template_wrong_req_method.format( method="POST") return JsonResponse(return_obj)
def test_create_get_delete_resource_file(self): hs = HydroShare(prompt_auth=False) # Add res_id = '511debf8858a4ea081f78d66870da76c' fpath = 'mocks/data/another_resource_file.txt' fname = os.path.basename(fpath) resp = hs.addResourceFile(res_id, fpath) self.assertEqual(resp, res_id) # Get tmpdir = tempfile.mkdtemp() res_file = hs.getResourceFile(res_id, fname, destination=tmpdir) self.assertTrue(filecmp.cmp(res_file, fpath, shallow=False)) shutil.rmtree(tmpdir) # Delete delres = hs.deleteResourceFile(res_id, fname) self.assertEqual(delres, res_id)
def test_create_get_delete_resource_file(self): hs = HydroShare() # Add res_id = '511debf8858a4ea081f78d66870da76c' fpath = 'mocks/data/another_resource_file.txt' fname = os.path.basename(fpath) resp = hs.addResourceFile(res_id, fpath) self.assertEqual(resp, res_id) # Get tmpdir = tempfile.mkdtemp() res_file = hs.getResourceFile(res_id, fname, destination=tmpdir) self.assertTrue(filecmp.cmp(res_file, fpath, shallow=False)) shutil.rmtree(tmpdir) # Delete delres = hs.deleteResourceFile(res_id, fname) self.assertEqual(delres, res_id)
def save_hs_to_favorites(resourceid, displayname, modeltype): dbs = { 'zone': zone, 'mult': mult, 'pval': pval, 'bas6': bas6, 'dis': dis, 'disu': disu, 'bcf6': bcf6, 'lpf': lpf, 'hfb6': hfb6, 'chd': chd, 'fhb': fhb, 'wel': wel, 'mnw1': mnw1, 'mnw2': mnw2, 'mnwi': mnwi, 'drn': drn, 'rch': rch, 'evt': evt, 'ghb': ghb, 'gmg': gmg, 'lmt6': lmt6, 'lmt7': lmt7, 'riv': riv, 'str': str, 'swi2': swi2, 'pcg': pcg, 'pcgn': pcgn, 'nwt': nwt, 'pks': pks, 'sms': sms, 'sfr': sfr, 'lak': lak, 'gage': gage, 'sip': sip, 'sor': sor, 'de4': de4, 'oc': oc, 'uzf': uzf, 'upw': upw, 'sub': sub, 'swt': swt, 'hyd': hyd, 'hob': hob, 'vdf': vdf, 'vsc': vsc, 'drt': drt, 'pvl': pvl, 'ets': ets, 'bas': bas, 'nam': nam, } Session = app.get_persistent_store_database('primary_db', as_sessionmaker=True) session = Session() hs = HydroShare() app_dir = app.get_app_workspace().path resourcelist = hs.getResourceFileList(resourceid) filelist = [] for resource in resourcelist: url = resource['url'].split("/") fname = url[-1] hs.getResourceFile(resourceid, fname, destination=app_dir) filelist.append(fname) json.dumps(filelist) fav = Model(resourceid=resourceid, displayname=displayname, modeltype=modeltype, modelfiles=filelist) # Add the model to the session, commit, and close session.add(fav) model = session.query(Model).filter( Model.displayname == displayname).first() mainid = model.id for fi in filelist: ext = fi.split(".")[1] filepath = os.path.join(app.get_app_workspace().path, fi) with open(filepath, 'r') as myfile: data = myfile.read() json.dumps(data) tbl = dbs[ext](data=data, ) # Add the model to the session, commit, and close session.add(tbl) session.commit() setattr(model, ext + 'id', tbl.id) session.commit() os.remove(filepath) session.close() return
# Get the authentication details usr = hydroshare_login['name'] pwd = hydroshare_login['pass'] # --- Download the data # Authenticate the user auth = HydroShareAuthBasic(username=usr, password=pwd) # Make a hydroshare object - note: needs authentication hs = HydroShare(auth=auth) # Specify the resource ID and download the resource data #out = hs.getResource(download_ID, destination=soil_path) out = hs.getResourceFile(download_ID, "usda_mode_soilclass_250m_ll.tif", destination=soil_path) # Check for output messages print(out) # --- Code provenance # Generates a basic log file in the domain folder and copies the control file and itself there. # Set the log path and file name logPath = soil_path log_suffix = '_soilclass_download_log.txt' # Create a log folder logFolder = '_workflow_log' Path(logPath / logFolder).mkdir(parents=True, exist_ok=True)
print "Bad Zip file"+ id # this block of code will add a time series to the legend and graph the result if (request.POST and "add_ts" in request.POST): if not outside_input: Current_r = request.POST['select_r_script'] if request.POST.get('hydroshare_resource') != None and request.POST.get('hydroshare_file')!= None: try: #adding a hydroshare resource hs = HydroShare() hs_resource = request.POST['hydroshare_resource'] hs_file = request.POST['hydroshare_file'] #hs_text =hs.getResourceFile("b29ac5ce06914752aaac74685ba0f682","DemoReferencedTimeSeries-wml_1.xml") hs_text =hs.getResourceFile(hs_resource,hs_file) # hs_lst =[] This was an old methond to extract the data from the resource. Probably obsolete # for line in hs_text: # hs_lst.append(line) # xml = ''.join(hs_lst) url_hs_resource = "https://www.hydroshare.org/resource/"+hs_resource+"/data/contents/"+hs_file #graph_original = Original_Checker(xml) session = SessionMaker() url1 = URL(url = url_hs_resource) session.add(url1) session.commit() session.close() except etree.XMLSyntaxError as e: #checks to see if data is an xml print "Error:Not XML" #quit("not valid xml") except ValueError, e: #checks to see if Url is valid