Example #1
0
 def save_object(self, obj, name):
     """
     Save an object (*obj*) to the data_file using the Pickle protocol,
     under the name *name* on the node /Objects.
     """
     s = cPickle.dumps(obj, protocol=-1)
     self.save_data(np.array(s, dtype='c'), "/Objects", name, force=True)
 def save_object(self, obj, name):
     """
     Save an object (*obj*) to the data_file using the Pickle protocol,
     under the name *name* on the node /Objects.
     """
     s = cPickle.dumps(obj, protocol=-1)
     self.save_data(np.array(s, dtype='c'), "/Objects", name, force = True)
    def write_hdf5(self, filename, dataset_name=None, info=None):
        r"""Writes ImageArray to hdf5 file.

        Parameters
        ----------
        filename: string
            The filename to create and write a dataset to

        dataset_name: string
            The name of the dataset to create in the file.

        info: dictionary
            A dictionary of supplementary info to write to append as attributes
            to the dataset.

        Examples
        --------
        >>> a = YTArray([1,2,3], 'cm')

        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}

        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
        ...              info=myinfo)

        """
        import h5py
        from yt.extern.six.moves import cPickle as pickle

        if info is None:
            info = {}

        info["units"] = str(self.units)
        info["unit_registry"] = np.void(pickle.dumps(self.units.registry.lut))

        if dataset_name is None:
            dataset_name = "array_data"

        f = h5py.File(filename)
        if dataset_name in f.keys():
            d = f[dataset_name]
            # Overwrite without deleting if we can get away with it.
            if d.shape == self.shape and d.dtype == self.dtype:
                d[:] = self
                for k in d.attrs.keys():
                    del d.attrs[k]
            else:
                del f[dataset_name]
                d = f.create_dataset(dataset_name, data=self)
        else:
            d = f.create_dataset(dataset_name, data=self)

        for k, v in info.items():
            d.attrs[k] = v
        f.close()
Example #4
0
    def write_hdf5(self, filename, dataset_name=None, info=None):
        r"""Writes ImageArray to hdf5 file.

        Parameters
        ----------
        filename: string
            The filename to create and write a dataset to

        dataset_name: string
            The name of the dataset to create in the file.

        info: dictionary
            A dictionary of supplementary info to write to append as attributes
            to the dataset.

        Examples
        --------
        >>> a = YTArray([1,2,3], 'cm')

        >>> myinfo = {'field':'dinosaurs', 'type':'field_data'}

        >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs',
        ...              info=myinfo)

        """
        import h5py
        from yt.extern.six.moves import cPickle as pickle
        if info is None:
            info = {}

        info['units'] = str(self.units)
        info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut))

        if dataset_name is None:
            dataset_name = 'array_data'

        f = h5py.File(filename)
        if dataset_name in f.keys():
            d = f[dataset_name]
            # Overwrite without deleting if we can get away with it.
            if d.shape == self.shape and d.dtype == self.dtype:
                d[:] = self
                for k in d.attrs.keys():
                    del d.attrs[k]
            else:
                del f[dataset_name]
                d = f.create_dataset(dataset_name, data=self)
        else:
            d = f.create_dataset(dataset_name, data=self)

        for k, v in info.items():
            d.attrs[k] = v
        f.close()
Example #5
0
 def dump(self, result_storage):
     if self.answer_name is None: return
     # This is where we dump our result storage up to Amazon, if we are able
     # to.
     import pyrax
     pyrax.set_credential_file(os.path.expanduser("~/.yt/rackspace"))
     cf = pyrax.cloudfiles
     c = cf.get_container("yt-answer-tests")
     pb = get_pbar("Storing results ", len(result_storage))
     for i, ds_name in enumerate(result_storage):
         pb.update(i)
         rs = cPickle.dumps(result_storage[ds_name])
         object_name = "%s_%s" % (self.answer_name, ds_name)
         if object_name in c.get_object_names():
             obj = c.get_object(object_name)
             c.delete_object(obj)
         c.store_object(object_name, rs)
     pb.finish()
 def dump(self, result_storage):
     if self.answer_name is None: return
     # This is where we dump our result storage up to Amazon, if we are able
     # to.
     import pyrax
     pyrax.set_credential_file(os.path.expanduser("~/.yt/rackspace"))
     cf = pyrax.cloudfiles
     c = cf.get_container("yt-answer-tests")
     pb = get_pbar("Storing results ", len(result_storage))
     for i, ds_name in enumerate(result_storage):
         pb.update(i)
         rs = cPickle.dumps(result_storage[ds_name])
         object_name = "%s_%s" % (self.answer_name, ds_name)
         if object_name in c.get_object_names():
             obj = c.get_object(object_name)
             c.delete_object(obj)
         c.store_object(object_name, rs)
     pb.finish()