예제 #1
0
	def getData(self):
		wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area, self.extract_depth)
		data = wcs_extractor.getData()
		fname = self.outdir+str(uuid.uuid4())+".nc"
		with open(fname, 'w') as outfile:
			outfile.write(data.read())
		return fname
예제 #2
0
   def getCoverageDescriptionData(self):
      coverage_description = {}
      ns = {'xmlns': 'http://www.opengis.net/wcs', 'gml': 'http://www.opengis.net/gml', 'xlink': 'http://www.w3.org/1999/xlink'}
      wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area, self.extract_depth)
      coverage_description_xml = ET.fromstring(wcs_extractor.describeCoverage())

      rectified_grid = coverage_description_xml.find('./xmlns:CoverageOffering/xmlns:domainSet/xmlns:spatialDomain/gml:RectifiedGrid', ns)
      axis_names = []
      for axis_name in rectified_grid.findall('./gml:axisName', ns):
         axis_names.append(axis_name.text)

      coverage_description['offset_vectors'] = {}
      i = 0
      for offset_vector in rectified_grid.findall('./gml:offsetVector', ns):
         for item in offset_vector.text.split(' '):
            if float(item) != 0:
               coverage_description['offset_vectors'][axis_names[i]] = float(item)
               i += 1

      # time_slices =
      coverage_description['time_slices'] = []
      for time_slice in coverage_description_xml.findall('./xmlns:CoverageOffering/xmlns:domainSet/xmlns:temporalDomain/gml:timePosition', ns):
         coverage_description['time_slices'].append(time_slice.text)

      return coverage_description
예제 #3
0
   def getCoverageDescriptionData(self):
      coverage_description = {}
      ns = {'xmlns': 'http://www.opengis.net/wcs', 'gml': 'http://www.opengis.net/gml', 'xlink': 'http://www.w3.org/1999/xlink'}
      wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area, self.extract_depth)
      coverage_description_xml = ET.fromstring(wcs_extractor.describeCoverage())

      rectified_grid = coverage_description_xml.find('./xmlns:CoverageOffering/xmlns:domainSet/xmlns:spatialDomain/gml:RectifiedGrid', ns)
      axis_names = []
      for axis_name in rectified_grid.findall('./gml:axisName', ns):
         axis_names.append(axis_name.text)

      coverage_description['offset_vectors'] = {}
      i = 0
      for offset_vector in rectified_grid.findall('./gml:offsetVector', ns):
         for item in offset_vector.text.split(' '):
            if float(item) != 0:
               coverage_description['offset_vectors'][axis_names[i]] = float(item)
               i += 1

      # time_slices =
      coverage_description['time_slices'] = []
      for time_slice in coverage_description_xml.findall('./xmlns:CoverageOffering/xmlns:domainSet/xmlns:temporalDomain/gml:timePosition', ns):
         coverage_description['time_slices'].append(time_slice.text)

      return coverage_description
예제 #4
0
	def getData(self):
		wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area, self.extract_depth)
		data = wcs_extractor.getData()
		fname = self.outdir+str(uuid.uuid4())+".nc"
		with open(fname, 'w') as outfile:
			outfile.write(data.read())
		return fname
예제 #5
0
   def getFiles(self, slices_in_range, max_slices):
      files = []
      total_requests = int(math.ceil(len(slices_in_range) / float(max_slices)))
      next_start = 0
      for i in range(0, int(math.ceil(len(slices_in_range) / float(max_slices)))):
         start = slices_in_range[next_start].strftime('%Y-%m-%d %H:%M:%S')
         if next_start + max_slices <= len(slices_in_range):
            end_index = next_start + max_slices - 1
         else:
            end_index = len(slices_in_range) - 1
         end = slices_in_range[end_index].strftime('%Y-%m-%d %H:%M:%S')
         next_start = next_start + max_slices
         extract_dates = start + '/' + end

         wcs_extractor = WCSRawHelper(self.wcs_url, extract_dates, self.extract_variable, self.extract_area, self.extract_depth)

         # Generate the file name based on the request URL
         fname = self.outdir + hashlib.md5(wcs_extractor.generateGetCoverageUrl()).hexdigest() + ".nc"

         if not os.path.isfile(fname):
            # If the same request hasn't been downloaded before
            download_complete = False
            while not download_complete:
               if plotting:
                  debug(3, "Making request {} of {}".format(i + 1, total_requests))
               data = wcs_extractor.getData()

               # Generate a temporary file name to download to
               fname_temp = self.outdir + str(uuid.uuid4()) + ".nc"

               if plotting:
                  debug(3,"Starting download {} of {}".format(i + 1, total_requests))
               # Download in 16K chunks. This is most efficient for speed and RAM usage.
               chunk_size = 16 * 1024
               with open(fname_temp, 'w') as outfile:
                  while True:
                     chunk = data.read(chunk_size)
                     if not chunk:
                        break
                     outfile.write(chunk)

               try:
                  netCDF.Dataset(fname_temp)
                  download_complete = True
               except RuntimeError:
                  if plotting:
                     debug(3, "Download is corrupt. Retrying...")
            # Rename the file after it's finished downloading
            os.rename(fname_temp, fname)

         if plotting:
            self.update_status(i + 1, total_requests)
         files.append(fname)
      return files
예제 #6
0
   def getFiles(self, slices_in_range, max_slices):
      files = []
      total_requests = int(math.ceil(len(slices_in_range) / float(max_slices)))
      next_start = 0
      for i in range(0, int(math.ceil(len(slices_in_range) / float(max_slices)))):
         start = slices_in_range[next_start].strftime('%Y-%m-%d %H:%M:%S')
         if next_start + max_slices <= len(slices_in_range):
            end_index = next_start + max_slices - 1
         else:
            end_index = len(slices_in_range) - 1
         end = slices_in_range[end_index].strftime('%Y-%m-%d %H:%M:%S')
         next_start = next_start + max_slices
         extract_dates = start + '/' + end

         wcs_extractor = WCSRawHelper(self.wcs_url, extract_dates, self.extract_variable, self.extract_area, self.extract_depth)

         # Generate the file name based on the request URL
         fname = self.outdir + hashlib.md5(wcs_extractor.generateGetCoverageUrl()).hexdigest() + ".nc"

         if not os.path.isfile(fname):
            # If the same request hasn't been downloaded before
            download_complete = False
            while not download_complete:
               if plotting:
                  debug(3, "Making request {} of {}".format(i + 1, total_requests))
               data = wcs_extractor.getData()

               # Generate a temporary file name to download to
               fname_temp = self.outdir + str(uuid.uuid4()) + ".nc"

               if plotting:
                  debug(3,"Starting download {} of {}".format(i + 1, total_requests))
               # Download in 16K chunks. This is most efficient for speed and RAM usage.
               chunk_size = 16 * 1024
               with open(fname_temp, 'w') as outfile:
                  while True:
                     chunk = data.read(chunk_size)
                     if not chunk:
                        break
                     outfile.write(chunk)

               try:
                  netCDF.Dataset(fname_temp)
                  download_complete = True
               except RuntimeError:
                  if plotting:
                     debug(3, "Download is corrupt. Retrying...")
            # Rename the file after it's finished downloading
            os.rename(fname_temp, fname)

         if plotting:
            self.update_status(i + 1, total_requests)
         files.append(fname)
      return files
예제 #7
0
	def getData(self):
		#print "="*20
		#print self.extract_dates
		start_time = time.time()
		wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area, self.extract_depth,single=True)
		data = wcs_extractor.getData()
		fname = self.outdir+str(uuid.uuid4())+".nc"
		with open(fname, 'w') as outfile:
			outfile.write(data.read())
		stop_time = time.time()
		ret = {}
		ret['time_diff'] = stop_time - start_time
		ret['file_size'] = os.stat(fname).st_size
		return json.dumps(ret)
예제 #8
0
	def getData(self, dest=None):
		#print self.wcs_url
		wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area, self.extract_depth)
		data = wcs_extractor.getData()
		uuid_filename = str(uuid.uuid4())+".nc"
		if dest:
			fname = dest+uuid_filename
		else:
			fname = self.outdir+uuid_filename
		with open(fname, 'w') as outfile:
			outfile.write(data.read())
		mask, data,_,_ = create_mask(self.masking_polygon,fname,self.extract_variable)
		#return basic(data, self.extract_variable,  filename=fname)
		if dest:
			return uuid_filename
		return fname
예제 #9
0
	def getData(self):
		#print "="*20
		#print self.extract_area
		# print self.wcs_url
		# print self.wcs_url_2
		wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates, self.extract_variable, self.extract_area)
		data = wcs_extractor.getData()
		fname_one = self.outdir+str(uuid.uuid4())+".nc"
		with open(fname_one, 'w') as outfile:
			outfile.write(data.read())
		wcs_extractor = WCSRawHelper(self.wcs_url_2, self.extract_dates, self.second_var, self.extract_area)
		data = wcs_extractor.getData()
		fname_two = self.outdir+str(uuid.uuid4())+".nc"
		with open(fname_two, 'w') as outfile:
			outfile.write(data.read())
		#print "getting data for %s and %s" % (self.extract_variable, self.second_var)
		#print {self.extract_variable :fname_one, self.second_var : fname_two }
		return {self.extract_variable :fname_one, self.second_var : fname_two }
		# 
예제 #10
0
 def getData(self):
     #print "="*20
     #print self.extract_area
     # print self.wcs_url
     # print self.wcs_url_2
     wcs_extractor = WCSRawHelper(self.wcs_url, self.extract_dates,
                                  self.extract_variable, self.extract_area)
     data = wcs_extractor.getData()
     fname_one = self.outdir + str(uuid.uuid4()) + ".nc"
     with open(fname_one, 'w') as outfile:
         outfile.write(data.read())
     wcs_extractor = WCSRawHelper(self.wcs_url_2, self.extract_dates,
                                  self.second_var, self.extract_area)
     data = wcs_extractor.getData()
     fname_two = self.outdir + str(uuid.uuid4()) + ".nc"
     with open(fname_two, 'w') as outfile:
         outfile.write(data.read())
     #print "getting data for %s and %s" % (self.extract_variable, self.second_var)
     #print {self.extract_variable :fname_one, self.second_var : fname_two }
     return {self.extract_variable: fname_one, self.second_var: fname_two}
     #