Esempio n. 1
0
File: io.py Progetto: cgyurgyik/yt
 def __init__(self, ds):
     if get_requests() is None:
         raise ImportError("This functionality depends on the requests package")
     self._url = ds.base_url
     # This should eventually manage the IO and cache it
     self.total_bytes = 0
     super(IOHandlerHTTPStream, self).__init__(ds)
Esempio n. 2
0
    def _parse_parameter_file(self):
        self.dimensionality = 3
        self.refine_by = 2
        self.parameters["HydroMethod"] = "sph"

        # Here's where we're going to grab the JSON index file
        requests = get_requests()
        hreq = requests.get(self.base_url + "/yt_index.json")
        if hreq.status_code != 200:
            raise RuntimeError
        header = json.loads(hreq.content)
        header['particle_count'] = dict((int(k), header['particle_count'][k])
                                        for k in header['particle_count'])
        self.parameters = header

        # Now we get what we need
        self.domain_left_edge = np.array(header['domain_left_edge'], "float64")
        self.domain_right_edge = np.array(header['domain_right_edge'],
                                          "float64")
        nz = 1 << self.over_refine_factor
        self.domain_dimensions = np.ones(3, "int32") * nz
        self.periodicity = (True, True, True)

        self.current_time = header['current_time']
        self.unique_identifier = header.get("unique_identifier", time.time())
        self.cosmological_simulation = int(header['cosmological_simulation'])
        for attr in ('current_redshift', 'omega_lambda', 'omega_matter',
                     'hubble_constant'):
            setattr(self, attr, float(header[attr]))

        self.file_count = header['num_files']
Esempio n. 3
0
 def _is_valid(self, *args, **kwargs):
     if not args[0].startswith("http://"):
         return False
     requests = get_requests()
     if requests is None:
         return False
     hreq = requests.get(args[0] + "/yt_index.json")
     if hreq.status_code == 200:
         return True
     return False
Esempio n. 4
0
 def _is_valid(cls, filename, *args, **kwargs):
     if not filename.startswith("http://"):
         return False
     requests = get_requests()
     if requests is None:
         return False
     hreq = requests.get(filename + "/yt_index.json")
     if hreq.status_code == 200:
         return True
     return False
Esempio n. 5
0
 def __init__(self, base_url,
              dataset_type="http_particle_stream", unit_system="cgs",
              index_order=None, index_filename=None):
     if get_requests() is None:
         raise ImportError(
             "This functionality depends on the requests package")
     self.base_url = base_url
     super(HTTPStreamDataset, self).__init__(
         "", dataset_type=dataset_type, unit_system=unit_system,
         index_order=index_order, index_filename=index_filename)
Esempio n. 6
0
 def _open_stream(self, data_file, field):
     # This does not actually stream yet!
     ftype, fname = field
     s = "%s/%s/%s/%s" % (self._url, data_file.file_id, ftype, fname)
     mylog.info("Loading URL %s", s)
     requests = get_requests()
     resp = requests.get(s)
     if resp.status_code != 200:
         raise RuntimeError
     self.total_bytes += len(resp.content)
     return resp.content
Esempio n. 7
0
 def __init__(self,
              base_url,
              dataset_type="http_particle_stream",
              n_ref=64,
              over_refine_factor=1,
              unit_system="cgs"):
     if get_requests() is None:
         raise ImportError(
             "This functionality depends on the requests package")
     self.base_url = base_url
     self.n_ref = n_ref
     self.over_refine_factor = over_refine_factor
     super(HTTPStreamDataset, self).__init__("",
                                             dataset_type,
                                             unit_system=unit_system)
Esempio n. 8
0
 def _is_valid(cls, *args, **kwargs):
     sdf_header = kwargs.get('sdf_header', args[0])
     if sdf_header.startswith("http"):
         requests = get_requests()
         if requests is None: 
             return False
         hreq = requests.get(sdf_header, stream=True)
         if hreq.status_code != 200: return False
         # Grab a whole 4k page.
         line = next(hreq.iter_content(4096))
     elif os.path.isfile(sdf_header):
         with safeopen(sdf_header, "r", encoding = 'ISO-8859-1') as f:
             line = f.read(10).strip()
     else:
         return False
     return line.startswith("# SDF")
Esempio n. 9
0
    def _parse_parameter_file(self):
        self.dimensionality = 3
        self.refine_by = 2
        self.parameters["HydroMethod"] = "sph"

        # Here's where we're going to grab the JSON index file
        requests = get_requests()
        hreq = requests.get(self.base_url + "/yt_index.json")
        if hreq.status_code != 200:
            raise RuntimeError
        header = json.loads(hreq.content)
        header["particle_count"] = {
            int(k): header["particle_count"][k]
            for k in header["particle_count"]
        }
        self.parameters = header

        # Now we get what we need
        self.domain_left_edge = np.array(header["domain_left_edge"], "float64")
        self.domain_right_edge = np.array(header["domain_right_edge"],
                                          "float64")
        self.domain_dimensions = np.ones(3, "int32")
        self._periodicity = (True, True, True)

        self.current_time = header["current_time"]
        self.unique_identifier = header.get("unique_identifier", time.time())
        self.cosmological_simulation = int(header["cosmological_simulation"])
        for attr in (
                "current_redshift",
                "omega_lambda",
                "omega_matter",
                "hubble_constant",
        ):
            setattr(self, attr, float(header[attr]))

        self.file_count = header["num_files"]