Exemple #1
0
    def save(self, output_file, overwrite=False):
        """Save the model to disk"""

        if os.path.exists(output_file) and overwrite is False:

            raise ModelFileExists(
                "The file %s exists already. If you want to overwrite it, use the 'overwrite=True' "
                "options as 'model.save(\"%s\", overwrite=True)'. " %
                (output_file, output_file))

        else:

            data = self.to_dict_with_types()

            # Write it to disk

            try:

                # Get the YAML representation of the data

                representation = my_yaml.dump(data, default_flow_style=False)

                with open(output_file, "w+") as f:

                    # Add a new line at the end of each voice (just for clarity)

                    f.write(representation.replace("\n", "\n\n"))

            except IOError:

                raise CannotWriteModel(
                    os.path.dirname(os.path.abspath(output_file)),
                    "Could not write model file %s. Check your permissions to write or the "
                    "report on the free space which follows: " % output_file)
def xspec_model_factory(model_name, xspec_function, model_type, definition):

    class_name = 'XS_%s' % model_name

    # Get the path to the user data directory
    user_data_path = str(get_user_data_path())

    # Check if the code for this function already exists

    code_file_name = os.path.join(user_data_path, '%s.py' % class_name)

    if os.path.exists(code_file_name):

        # Code already exists
        pass

    else:

        print("Generating code for Xspec model %s..." % model_name)

        # If this is an additive model (model_type == 'add') we need to add
        # one more parameter (normalization)

        if model_type == 'add':

            definition['parameters']['norm'] = {
                'initial value': 1.0,
                'desc':
                '(see https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/'
                'XspecModels.html)',
                'min': 0,
                'max': None,
                'delta': 0.1,
                'unit': 'keV / (cm2 s)',
                'free': True
            }

        assert model_type != 'con', "Convolution models are not yet supported"

        # Get a list of the parameter names
        parameters_names = ", ".join(list(definition['parameters'].keys()))

        # Create the docstring
        docstring = my_yaml.dump(definition, default_flow_style=False)

        # Create the class by substituting in the class_definition_code the
        # relevant things for this model

        code = class_definition_code.replace('$MODEL_NAME$', model_name)
        code = code.replace('$DOCSTRING$', docstring)
        code = code.replace('$PARAMETERS_NAMES$', parameters_names)
        code = code.replace('$XSPEC_FUNCTION$', xspec_function)
        code = code.replace('$MODEL_TYPE$', model_type)

        # Write to the file

        with open(code_file_name, 'w+') as f:

            f.write(
                "# This code has been automatically generated. Do not edit.\n")
            f.write("\n\n%s\n" % code)

        time.sleep(1)

    # Add the path to sys.path if it doesn't
    if user_data_path not in sys.path:

        sys.path.append(user_data_path)

    # Import the class in the current namespace (locals)
    with warnings.catch_warnings():
        warnings.simplefilter("error")
        exec('from %s import %s' % (class_name, class_name))

    # Return the class we just created
    return class_name, locals()[class_name]
Exemple #3
0
    def __init__(self, analysis_results):

        optimized_model = analysis_results.optimized_model

        # Gather the dictionary with free parameters

        free_parameters = optimized_model.free_parameters

        n_parameters = len(free_parameters)

        # Gather covariance matrix (if any)

        if analysis_results.analysis_type == "MLE":

            assert isinstance(analysis_results, MLEResults)

            covariance_matrix = analysis_results.covariance_matrix

            # Check that the covariance matrix has the right shape

            assert covariance_matrix.shape == (n_parameters, n_parameters), \
                "Matrix has the wrong shape. Should be %i x %i, got %i x %i" % (n_parameters, n_parameters,
                                                                                covariance_matrix.shape[0],
                                                                                covariance_matrix.shape[1])

            # Empty samples set
            samples = np.zeros(n_parameters)

        else:

            assert isinstance(analysis_results, BayesianResults)

            # Empty covariance matrix

            covariance_matrix = np.zeros(n_parameters)

            # Gather the samples
            samples = analysis_results._samples_transposed

        # Serialize the model so it can be placed in the header

        yaml_model_serialization = my_yaml.dump(
            optimized_model.to_dict_with_types())

        # Replace characters which cannot be contained in a FITS header with other characters
        yaml_model_serialization = _escape_yaml_for_fits(
            yaml_model_serialization)

        # Get data frame with parameters (always use equal tail errors)

        data_frame = analysis_results.get_data_frame(error_type="equal tail")

        # Prepare columns

        data_tuple = [('NAME', free_parameters.keys()),
                      ('VALUE', data_frame['value'].values),
                      ('NEGATIVE_ERROR', data_frame['negative_error'].values),
                      ('POSITIVE_ERROR', data_frame['positive_error'].values),
                      ('ERROR', data_frame['error'].values),
                      ('UNIT', np.array(data_frame['unit'].values, str)),
                      ('COVARIANCE', covariance_matrix), ('SAMPLES', samples)]

        # Init FITS extension

        super(ANALYSIS_RESULTS, self).__init__(data_tuple,
                                               self._HEADER_KEYWORDS)

        # Update keywords with their values for this instance
        self.hdu.header.set("MODEL", yaml_model_serialization)
        self.hdu.header.set("RESUTYPE", analysis_results.analysis_type)

        # Now add two keywords for each instrument
        stat_series = analysis_results.optimal_statistic_values  # type: pd.Series

        for i, (plugin_instance_name,
                stat_value) in enumerate(stat_series.iteritems()):
            self.hdu.header.set("STAT%i" % i,
                                stat_value,
                                comment="Stat. value for plugin %i" % i)
            self.hdu.header.set("PN%i" % i,
                                plugin_instance_name,
                                comment="Name of plugin %i" % i)

        # Now add the statistical measures

        measure_series = analysis_results.statistical_measures  # type: pd.Series

        for i, (measure,
                measure_value) in enumerate(measure_series.iteritems()):
            self.hdu.header.set("MEAS%i" % i,
                                measure,
                                comment="Measure type %i" % i)
            self.hdu.header.set("MV%i" % i,
                                measure_value,
                                comment="Measure value %i" % i)
    def _parse_point_source(self, pts_source_definition):

        # Parse the positional information

        try:

            position_definition = pts_source_definition['position']

        except KeyError:  # pragma: no cover

            raise ModelSyntaxError(
                "Point source %s is missing the 'position' attribute" %
                self._source_name)

        this_sky_direction = self._parse_sky_direction(position_definition)

        # Parse the spectral information

        try:

            spectrum = pts_source_definition['spectrum']

        except KeyError:  # pragma: no cover

            raise ModelSyntaxError(
                "Point source %s is missing the 'spectrum' attribute" %
                self._source_name)

        components = []

        for component_name, component_definition in pts_source_definition[
                'spectrum'].items():

            try:

                this_component = self._parse_spectral_component(
                    component_name, component_definition)

                components.append(this_component)

            except:

                raise

                raise RuntimeError("Could not parse: %s" %
                                   my_yaml.dump(component_definition))

        try:

            this_point_source = point_source.PointSource(
                self._source_name,
                sky_position=this_sky_direction,
                components=components)

        except:

            raise

            raise RuntimeError("Could not parse: %s" %
                               my_yaml.dump(pts_source_definition))

        return this_point_source