def _check_for_outputs(self, potential_outputs):
        """
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.", 
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs, 
                                                   storage=my_outputs):
            if self.parameters['DataDumpDir'] in output:
                dir_key = self.parameters['DataDumpDir']
                output_key = self.parameters['DataDumpName']
            else:
                dir_key = self.parameters['RedshiftDumpDir']
                output_key = self.parameters['RedshiftDumpName']
            index = output[output.find(dir_key) + len(dir_key):]
            filename = os.path.join(self.parameters['GlobalDir'],
                                    "%s%s" % (dir_key, index),
                                    "%s%s" % (output_key, index))
            if os.path.exists(filename):
                try:
                    ds = load(filename)
                    if ds is not None:
                        my_storage.result = {'filename': filename,
                                             'time': ds.current_time.in_units("s")}
                        if ds.cosmological_simulation:
                            my_storage.result['redshift'] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error('Failed to load %s', filename)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]

        return my_outputs
    def _find_outputs(self):
        """
        Search for directories matching the data dump keywords.
        If found, get dataset times py opening the ds.
        """

        # look for time outputs.
        potential_time_outputs = \
          glob.glob(os.path.join(self.parameters['GlobalDir'],
                                 "%s*" % self.parameters['DataDumpDir']))
        self.all_time_outputs = \
          self._check_for_outputs(potential_time_outputs)
        self.all_time_outputs.sort(key=lambda obj: obj['time'])

        # look for redshift outputs.
        potential_redshift_outputs = \
          glob.glob(os.path.join(self.parameters['GlobalDir'],
                                 "%s*" % self.parameters['RedshiftDumpDir']))
        self.all_redshift_outputs = \
          self._check_for_outputs(potential_redshift_outputs)
        self.all_redshift_outputs.sort(key=lambda obj: obj['time'])

        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
        self.all_outputs.sort(key=lambda obj: obj['time'])
        only_on_root(mylog.info, "Located %d total outputs.",
                     len(self.all_outputs))

        # manually set final time and redshift with last output
        if self.all_outputs:
            self.final_time = self.all_outputs[-1]['time']
            if self.cosmological_simulation:
                self.final_redshift = self.all_outputs[-1]['redshift']
    def _find_outputs(self):
        """
        Search for directories matching the data dump keywords.
        If found, get dataset times py opening the ds.
        """

        # look for time outputs.
        potential_time_outputs = \
          glob.glob(os.path.join(self.parameters['GlobalDir'],
                                 "%s*" % self.parameters['DataDumpDir']))
        self.all_time_outputs = \
          self._check_for_outputs(potential_time_outputs)
        self.all_time_outputs.sort(key=lambda obj: obj['time'])

        # look for redshift outputs.
        potential_redshift_outputs = \
          glob.glob(os.path.join(self.parameters['GlobalDir'],
                                 "%s*" % self.parameters['RedshiftDumpDir']))
        self.all_redshift_outputs = \
          self._check_for_outputs(potential_redshift_outputs)
        self.all_redshift_outputs.sort(key=lambda obj: obj['time'])

        self.all_outputs = self.all_time_outputs + self.all_redshift_outputs
        self.all_outputs.sort(key=lambda obj: obj['time'])
        only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))

        # manually set final time and redshift with last output
        if self.all_outputs:
            self.final_time = self.all_outputs[-1]['time']
            if self.cosmological_simulation:
                self.final_redshift = self.all_outputs[-1]['redshift']
Beispiel #4
0
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            if os.path.exists(output):
                try:
                    ds = load(output)
                    if ds is not None:
                        my_storage.result = {
                            "filename": output,
                            "time": ds.current_time.in_units("s")
                        }
                        if ds.cosmological_simulation:
                            my_storage.result["redshift"] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error("Failed to load %s", output)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]
        return my_outputs
    def _find_outputs(self):
        """
        Search for directories matching the data dump keywords.
        If found, get dataset times py opening the ds.
        """

        potential_outputs = glob.glob(self._snapshot_format())
        self.all_outputs = self._check_for_outputs(potential_outputs)
        self.all_outputs.sort(key=lambda obj: obj["time"])
        only_on_root(mylog.info, "Located %d total outputs.", len(self.all_outputs))

        # manually set final time and redshift with last output
        if self.all_outputs:
            self.final_time = self.all_outputs[-1]["time"]
            if self.cosmological_simulation:
                self.final_redshift = self.all_outputs[-1]["redshift"]
Beispiel #6
0
    def _find_outputs(self):
        """
        Search for directories matching the data dump keywords.
        If found, get dataset times py opening the ds.
        """

        potential_outputs = glob.glob(self._snapshot_format())
        self.all_outputs = self._check_for_outputs(potential_outputs)
        self.all_outputs.sort(key=lambda obj: obj["time"])
        only_on_root(mylog.info, "Located %d total outputs.",
                     len(self.all_outputs))

        # manually set final time and redshift with last output
        if self.all_outputs:
            self.final_time = self.all_outputs[-1]["time"]
            if self.cosmological_simulation:
                self.final_redshift = self.all_outputs[-1]["redshift"]
    def _check_for_outputs(self, potential_outputs):
        """
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.",
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs,
                                                   storage=my_outputs):
            if self.parameters['DataDumpDir'] in output:
                dir_key = self.parameters['DataDumpDir']
                output_key = self.parameters['DataDumpName']
            else:
                dir_key = self.parameters['RedshiftDumpDir']
                output_key = self.parameters['RedshiftDumpName']
            index = output[output.find(dir_key) + len(dir_key):]
            filename = os.path.join(self.parameters['GlobalDir'],
                                    "%s%s" % (dir_key, index),
                                    "%s%s" % (output_key, index))
            if os.path.exists(filename):
                try:
                    ds = load(filename)
                    if ds is not None:
                        my_storage.result = {
                            'filename': filename,
                            'time': ds.current_time.in_units("s")
                        }
                        if ds.cosmological_simulation:
                            my_storage.result['redshift'] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error('Failed to load %s', filename)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]

        return my_outputs
    def _check_for_outputs(self, potential_outputs):
        r"""
        Check a list of files to see if they are valid datasets.
        """

        only_on_root(mylog.info, "Checking %d potential outputs.", 
                     len(potential_outputs))

        my_outputs = {}
        for my_storage, output in parallel_objects(potential_outputs, 
                                                   storage=my_outputs):
            if os.path.exists(output):
                try:
                    ds = load(output)
                    if ds is not None:
                        my_storage.result = {"filename": output,
                                             "time": ds.current_time.in_units("s")}
                        if ds.cosmological_simulation:
                            my_storage.result["redshift"] = ds.current_redshift
                except YTOutputNotIdentified:
                    mylog.error("Failed to load %s", output)
        my_outputs = [my_output for my_output in my_outputs.values() \
                      if my_output is not None]
        return my_outputs