Example #1
0
    def splitAndValidateDatadir(self):
        """
        Ensures that files will be output to a location that Firefly 
        can read, as well as splits the path so that filenames.json 
        references files correctly.
        """
        path_prefix, path = os.path.split(self.JSONdir)
        if path_prefix == '':
            path_prefix = os.getcwd()

        for validate in ['index.html', 'static', 'LICENSE', 'README.md']:
            try:
                assert validate in os.listdir(
                    os.path.join(os.path.split(path_prefix)[0], ".."))
            except:
                warnings.warn(
                    FireflyWarning(
                        "JSONdir: {} -- ".format(self.JSONdir) +
                        "is not a sub-directory of Firefly/static/data. " +
                        "\nThis may produce confusing or inoperable results. "
                        +
                        "As such, we will create a symlink for you. You're " +
                        "welcome."))

        return path_prefix, path
Example #2
0
    def outputToJSON(self,
                     JSONdir,
                     filename=None,
                     prefix='',
                     loud=1,
                     write_jsons_to_disk=True):
        """
        Saves the current options to a JSON file.
        Input:
            JSONdir - path for this file to get saved to
            prefix='' - string to prepend to self.options_filename
            loud=1 - flag for whether warnings should be shown
        """
        filename = self.options_filename if filename is None else filename
        all_options_dict = self.outputToDict()

        filename = os.path.join(JSONdir, prefix + filename)

        if loud:
            warnings.warn(
                FireflyWarning(
                    "You will need to add this options filename to" +
                    " filenames.json if this was not called by a Reader instance."
                ))

        ## convert dictionary to a JSON
        return filename, write_to_json(
            all_options_dict,
            filename if write_jsons_to_disk else None)  ## None -> string
Example #3
0
    def __init__(
        self,
        UIname,
        coordinates,
        tracked_arrays = None,
        tracked_names = None,
        tracked_filter_flags = None,
        tracked_colormap_flags = None,
        decimation_factor = 1,
        filenames_and_nparts = None,
        linked_options=None,
        doSPHrad=False,
        **option_kwargs):
        """
        `UIname` - Name of the particle group that shows up in the UI, 4-5 characters is best

        `coordinates` - The coordinates of the points in 3d space, should have a shape of `(nparts,3)`.

        `tracked_arrays=[]` - The arrays to associate with each coordinate in space, each array
            should be one-dimensional and have `nparts` entries.

        `tracked_names=[]` - Should be the same length as `tracked_arrays`, and gives a
            name to each of the arrays when they show up in the UI dropdowns.

        `tracked_filter_flags=[]` - Should be the same length as `tracked_arrays`,
            and gives a flag for whether that array should be available as an interactive filter within Firefly.
        `tracked_colormap_flags=[]` - Should be the same length as `tracked_arrays`,
            and gives a flag for whether that array should be available to color points within Firefly.

        `decimation_factor=1` - An integer factor to sub-sample the provided dataset at
            (in addition to any manual subsampling you might do). This will choose
            `nparts/decimation_factor` many points at random from the dataset to display in Firefly. 

        `filenames_and_nparts=None` - Allows you to manually control how the particles
            are distributed among the JSON files, **highly recommended that
            you leave this to** `None`, but if for whatever reason you need fine-tuning
            you should pass a list of tuples in the form 
            `[("json_name0.json",nparts_this_file0),("json_name1.json",nparts_this_file1) ... ]`
            where where the sum of `nparts_this_file%d` is exactly `nparts`. These files
            will automatically be added to `filenames.json` if you use `reader.dumpToJSON`.

        `doSPHrad=False` - flag to vary the opacity across a particle by the SPH cubic spline. Should
            also provide SmoothingLength as a tracked_array. 

        `**option_kwargs` - allows you to set default options like the color, particle sizes,
            etc... for this particle group at the creation of the instance. You can see available
            options by looking at `list(particleGroup.options_default.keys())`.
        """

        ## handle default values for iterables
        tracked_arrays = [] if tracked_arrays is None else tracked_arrays
        tracked_names = [] if tracked_names is None else tracked_names
        tracked_filter_flags = [] if tracked_filter_flags is None else tracked_filter_flags
        tracked_colormap_flags = [] if tracked_colormap_flags is None else tracked_colormap_flags

        ## assert statements and user-friendly error messages
        try:
            assert len(tracked_names) == len(tracked_arrays)
        except:
            raise ValueError("Make sure each tracked_array has a tracked_name")
    
        try: 
            assert len(tracked_names) == len(tracked_filter_flags)
        except:
            warnings.warn(FireflyWarning(
                "Make sure each tracked_array has a tracked_filter_flag, assuming True."))
            new_tracked_filter_flags = np.append(
                tracked_filter_flags,
                [True]*(len(tracked_names)-len(tracked_filter_flags)),axis=0
            )
            tracked_filter_flags = new_tracked_filter_flags

        try: 
            assert len(tracked_names) == len(tracked_colormap_flags)
        except:
            warnings.warn(FireflyWarning(
                "Make sure each tracked_array has a tracked_colormap_flag, assuming True."))
            new_tracked_colormap_flags = np.append(
                tracked_colormap_flags,
                [True]*(len(tracked_names)-len(tracked_colormap_flags)),axis=0
            )
            tracked_colormap_flags = new_tracked_colormap_flags

        
        if filenames_and_nparts is not None:
            try:
                assert type(filenames_and_nparts[0]) == tuple
                assert type(filenames_and_nparts[0][0]) == str
                assert type(filenames_and_nparts[0][1]) == int
            except AssertionError:
                ValueError("filenames_and_nparts should be a list of tuples of strings and ints")
        
        self.decimation_factor = decimation_factor
        ## what do we want this to be called in the UI? 
        self.UIname = UIname

        ## the most important thing, where do you want these particles
        ##  to live?
        self.coordinates = np.array(coordinates)

        if self.decimation_factor > self.coordinates.shape[0]:
            self.decimation_factor = max(1,self.coordinates.shape[0]-1)

        ## initialize this badboy
        self.nparts = len(coordinates)

        ## these are the values we're associating with each particle
        ##  make sure each one has a name
        for name,array in zip(tracked_names,tracked_arrays):
            try:
                assert len(array) == self.nparts
            except:
                raise ValueError("You passed me %s that is not the right shape!"%name)

        self.tracked_names = tracked_names
        self.tracked_arrays = tracked_arrays
        self.tracked_filter_flags = np.array(tracked_filter_flags)
        self.tracked_colormap_flags = np.array(tracked_colormap_flags)

        self.filenames_and_nparts = filenames_and_nparts

        ## TODO how do these interface with javascript code?
        self.radiusFunction = None
        self.weightFunction = None

        ## setup the options for this particleGroup 
        self.options_default = {
            'UIparticle':True,
            'UIdropdown':True,
            'UIcolorPicker':True,
            'color': np.append(np.random.random(3),[1]),
            'sizeMult':1.,
            'showParts':True,
            'filterVals':dict(),
            'filterLims':dict(),
            'colormapVals':dict(),
            'colormapLims':dict(),
            'colormap':1./64,
            'colormapVariable':0,
            'showColormap':False,
            'showVel':False,
            'plotNmax':None,
            'velType':None
        }
        
        ## setup default values for the initial filter limits (vals/lims represent the interactive
        ##  "displayed" particles and the available boundaries for the limits)
        for tracked_name,tracked_filter_flag in zip(self.tracked_names,self.tracked_filter_flags):
            if tracked_filter_flag:
                self.options_default['filterVals'][tracked_name] = None
                self.options_default['filterLims'][tracked_name] = None

        ## setup default values for the initial color limits (vals/lims represent the interactive
        ##  "displayed" particles and the available boundaries for the limits)
        for tracked_name,tracked_colormap_flag in zip(self.tracked_names,self.tracked_colormap_flags):
            if tracked_filter_flag:
                self.options_default['colormapVals'][tracked_name] = None
                self.options_default['colormapLims'][tracked_name] = None
        
        ## now let the user overwrite the defaults if they'd like (e.g. the color, likely
        ##  the most popular thing users will like to do
        for option_kwarg in option_kwargs:
            if option_kwarg in self.options_default.keys():
                if option_kwarg == 'color':
                    try:
                        assert len(option_kwargs[option_kwarg]) == 4
                    except AssertionError:
                        raise ValueError("Make sure you pass the color as an RGBA array")
                        
                self.options_default[option_kwarg] = option_kwargs[option_kwarg]
            else:
                raise KeyError("Invalid option kwarg")
        self.linked_options = linked_options
        self.doSPHrad = doSPHrad
Example #4
0
    def outputToJSON(
        self,
        path, ## sub-directory name
        path_prefix, ## absolute path to Firefly/data
        prefix, ## prefix of JSON filename
        loud=1,
        nparts_per_file = 10**4,
        clean=0,
        write_jsons_to_disk=True):
        """
        Outputs this ParticleGroup instance's data to JSON format, best used when coupled with a Reader
        instance's dumpToJSON method. 
        Input:
            path - the name of the sub-directory of Firefly/data you want to put these files into
            path_prefix - the the path to Firefly/data
            prefix - the string you want to prepend to the data JSONs
            loud=1 - flag to print warnings that you should hear if you're not using a
                reader that does these things for you
            nparts_per_file=10**4 - maximum number of particles per JSON file
            clean=0 - flag for whether the JSON directory should be purged before writing your files.
         """

        ## shuffle particles and decimate as necessary, save the output in dec_inds
        self.getDecimationIndexArray()

        ## where are we saving this json to?
        full_path = os.path.join( path_prefix, path )
        if not os.path.isdir(full_path):
            os.makedirs(full_path)
        if loud:
            FireflyMessage(
                "You will need to add the sub-filenames to"+
                " filenames.json if this was not called by a Reader instance.")
            FireflyMessage("Writing:",self,"JSON to %s"%full_path)

        ## do we want to delete any existing jsons here?
        if clean:
            warnings.warn(FireflyWarning("Removing old JSON files from %s"%full_path))
            for fname in os.listdir(full_path):
                if "json" in fname:
                    os.remove(os.path.join(full_path,fname))

        ## if the user did not specify how we should partition the data between
        ##  sub-JSON files then we'll just do it equally
        if self.filenames_and_nparts is None:
            ## determine if we were passed a boolean mask or a index array
            if self.dec_inds.dtype == bool:
                nparts = np.sum(self.dec_inds)
                self.dec_inds = np.argwhere(self.dec_inds) ## convert to an index array
            else:
                nparts = self.dec_inds.shape[0]

            ## how many sub-files are we going to need?
            nfiles = int(nparts/nparts_per_file + ((nparts%nparts_per_file)!=0))

            ## how many particles will each file have and what are they named?
            filenames = [os.path.join(path,"%s%s%03d.json"%(prefix,self.UIname,i_file)) for i_file in range(nfiles)]
            nparts = [min(nparts_per_file,nparts-(i_file)*(nparts_per_file)) for i_file in range(nfiles)]

            self.filenames_and_nparts = list(zip(filenames,nparts))
        
        JSON_array = []
        ## loop through the sub-files
        cur_index = 0
        for i_file,(fname,nparts_this_file) in enumerate(self.filenames_and_nparts):
            ## pick out the indices for this file
            if self.decimation_factor > 1:
                these_dec_inds = self.dec_inds[cur_index:cur_index+nparts_this_file]
            else:
                ## create a dummy index array that takes everything
                these_dec_inds = np.arange(cur_index,cur_index+nparts_this_file)
        
            ## format an output dictionary
            outDict = self.outputToDict(these_dec_inds, i_file)

            fname = os.path.join(path_prefix,fname)

            JSON_array += [(
                fname,
                write_to_json(outDict,
                    fname if write_jsons_to_disk else None))] ## path=None -> returns a string

            ## move onto the next file
            cur_index += nparts_this_file
        
        return JSON_array
Example #5
0
    def __init__(
            self,
            snapdir,  # directory that contains all the hdf5 data files
            snapnum,  # which snapnumber to open
            ptypes=None,  # which particle types to extract
            UInames=None,  # what those particle types will be called in the UI
            decimation_factors=None,  # factor by which to decimate the particle types by
            returnKeys=None,  # which things to read from the simulation
            filterFlags=None,  # flags whether we should filter by that returnKey
            colormapFlags=None,  # flags whether we should color by that returnKey
            doMags=None,  # flags for whether we should take the magnitude of that returnKey
            doLogs=None,  # flags for whether we should take the log of that returnKey
            ## arguments from Reader
        JSONdir=None,  ## abs path, must be a sub-directory of Firefly/data
            write_startup='append',  # True -> write | False -> leave alone | "append" -> adds to existing file
            max_npart_per_file=10**4,
            prefix='FIREData',
            clean_JSONdir=0,
            options=None,
            tweenParams=None):
        """
        snapdir - string, directory that contains all the hdf5 data files
        snapnum - integer, which snapshot to open
        ptypes=[] - list of strings, which particle types to extract (e.g. 'PartType0', 'PartType1')
        UInames=[] - list of strings, what should the particle groups be called in the UI
        decimation_factors=[] - list of integers, by what factor should the datasets be subsampled
        returnKeys=[] - list of strings, which arrays from the snapshot should we extract,
            do not include 'Coordinates'
        filterFlags=[] - list of booleans, of those, which should be "filterable"
        colormapFlags=[] - list of booleans, of those, which should be "colarable"
        doMags=[] - list of booleans, of those, which should have their magnitude taken (e.g. velocity)
        doLogs=[] - list of booleans, of those, which should have their log10 taken (e.g. density)

        ------ inherited from Reader ------
        `JSONdir=None` - This should be the name of the sub-directory that will
            contain your JSON files, if you are not running python from
            `/path/to/Firefly/data` it should be the absolute path.

        `options=None` - An `Options` instance, if you have created one you can
            pass it here. `None` will generate default options. `reader.options.listKeys()`
            will give you a list of the different available options you can set
            using `reader.options["option_name"] = option_value`. 

        `tweenParams=None` - a tweenParams instance for automating a fly-through
            path by pressing `t` while within an open instance of Firefly.

        `write_startup='append'` - This is a flag for whether `startup.json` file
            should be written. It has 3 values: `True` -> writes a new `startup.json`
            that will contain only this visualization, `'append'` -> which will
            add this visualization to an existing `startup.json` (or create a
            new one), this is the default option, or `False` -> which will not
            add an entry to `startup.json`.

        `max_npart_per_file=10000` - The maximum number of particles saved per file,
            don't use too large a number or you will have trouble loading
            the individual files in. 

        `prefix='Data'` - What you would like your `.json` files to be called when
            you run `reader.dumpToJSON`. The format is
            `(prefix)(particleGroupName)(fileNumber).json`.

        `clean_JSONdir=0` - Whether you would like to delete all `.json` files in
            the `JSONdir`. Usually not necessary (since `filenames.json` will be
            updated) but good to clean up after yourself.
        """

        ## handle default input
        ptypes = [] if ptypes is None else ptypes
        UInames = [] if UInames is None else UInames
        decimation_factors = [] if decimation_factors is None else decimation_factors
        returnKeys = [] if returnKeys is None else returnKeys
        filterFlags = [] if filterFlags is None else filterFlags
        colormapFlags = [] if colormapFlags is None else colormapFlags
        doMags = [] if doMags is None else doMags
        doLogs = [] if doLogs is None else doLogs

        ## input validation
        ##  ptypes
        try:
            lists = [decimation_factors, UInames]
            names = ['decimation_factors', 'UInames']
            for name, llist in zip(names, lists):
                assert len(llist) == len(ptypes)
        except AssertionError:
            raise ValueError("%s is not the same length as ptypes (%d,%d)" %
                             (name, len(llist), len(ptypes)))

        ##  returnKeys
        try:
            lists = [filterFlags, colormapFlags, doMags, doLogs]
            names = ['filterFlags', 'colormapFlags', 'doMags', 'doLogs']
            for name, llist in zip(names, lists):
                assert len(llist) == len(returnKeys)
        except AssertionError:
            raise ValueError(
                "%s is not the same length as returnKeys (%d,%d)" %
                (name, len(llist), len(returnKeys)))

        ##  IO/snapshots
        try:
            assert os.path.isdir(snapdir)
        except AssertionError:
            raise IOError("Cannot find %s" % snapdir)

        ##  this I handle separately
        if 'Coordinates' in returnKeys:
            warnings.warn(
                FireflyWarning(
                    "Do not put Coordinates in returnKeys,removing it... (and its flags)"
                ))
            returnKeys = list(returnKeys)
            filterFlags = list(filterFlags)
            colormapFlags = list(colormapFlags)
            doMags = list(doMags)
            doLogs = list(doLogs)

            index = returnKeys.index('Coordinates')

            for llist in [returnKeys, filterFlags, doMags, doLogs]:
                llist.pop(index)

        ## where to find the HDF5 files
        self.snapdir = snapdir
        self.snapnum = snapnum

        ## which particles we want to extract
        self.ptypes = ptypes

        ## what do we want to call those particles in the UI
        self.UInames = UInames

        ## do we want to decimate the arrays at all?
        self.decimation_factors = decimation_factors

        ## what attributes do we want to load of that particle type?
        self.returnKeys = returnKeys

        ## do we want to filter on that attribute?
        self.filterFlags = filterFlags

        ## do we want to color by that attribute?
        self.colormapFlags = colormapFlags

        ## do we need to take the magnitude of it? (velocity? typically not..)
        self.doMags = doMags

        ## do we need to take the log of it
        self.doLogs = doLogs

        ####### execute generic Reader __init__ below #######
        super().__init__(JSONdir=JSONdir,
                         write_startup=write_startup,
                         max_npart_per_file=max_npart_per_file,
                         prefix=prefix,
                         options=options,
                         clean_JSONdir=clean_JSONdir,
                         tweenParams=tweenParams)
Example #6
0
import numpy as np
import os

from firefly_api.options import Options
from firefly_api.reader import Reader, ParticleGroup
from firefly_api.errors import FireflyError, FireflyWarning, FireflyMessage, warnings

try:
    ### depends on abg_python, if you're me that's not a problem!
    from abg_python.snapshot_utils import openSnapshot
except ImportError:
    try:
        import snapshot_utils
        warnings.warn(
            FireflyWarning("importing openSnapshot from: {}".format(
                snapshot_utils.__file__)))
        openSnapshot = snapshot_utils.openSnapshot
    except ImportError:
        raise ImportError(
            "snapshot_utils not found, try looking inside Firefly/data" +
            "or use yt to open your gizmo data")


class FIREreader(Reader):
    """
    This is an example of a "custom" Reader that has been tuned to conveniently
    open data from the FIRE galaxy formation collaboration (fire.northwestern.edu).
    You should use this as a primer for building your own custom reader!
    """
    def __init__(
            self,