示例#1
0
    def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
        time_0 = density_dict.pop('time_0', 19.9999584)
        if isinstance(time_0, basestring):
            time_0 = parse_quantity(time_0).to('s').value
        else:
            logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0)
        try:
            rho_0 = density_dict.pop('rho_0')
            if isinstance(rho_0, basestring):
                rho_0 = parse_quantity(rho_0).to('g/cm^3').value
            else:
                raise KeyError
        except KeyError:
            rho_0 = 1e-2
            logger.warning('rho_o was not given in the config! Using %g', rho_0)
        try:
            v_0 = density_dict.pop('v_0')
            if isinstance(v_0, basestring):
                v_0 = parse_quantity(v_0).to('km/s').value
            
        except KeyError:
            v_0 = 1
            logger.warning('v_0 was not given in the config file! Using %f km/s', v_0)

        velocities = 0.5 * (v_inner + v_outer)
        densities = calc_exponential_density(velocities, v_0, rho_0)
        densities = u.Quantity(densities, 'g/cm^3')
        return densities
示例#2
0
    def parse_power_law(density_dict, v_inner, v_outer, time_explosion):
        time_0 = density_dict.pop('time_0', 19.9999584)
        if isinstance(time_0, basestring):
            time_0 = parse_quantity(time_0).to('s')
        else:
            logger.debug('time_0 not supplied for density powerlaw - using sensible default %g', time_0)
        try:
            rho_0 = density_dict.pop('rho_0')
            if isinstance(rho_0, basestring):
                rho_0 = parse_quantity(rho_0)
            else:
                raise KeyError
        except KeyError:
            rho_0 = parse_quantity('1e-2 g/cm^3')
            logger.warning('rho_o was not given in the config! Using %g', rho_0)
        try:
            exponent = density_dict.pop('exponent')
        except KeyError:
            exponent = 2
            logger.warning('exponent was not given in the config file! Using %f', exponent)
        try:
            v_0 = density_dict.pop('v_0')
            if isinstance(v_0, basestring):
                v_0 = parse_quantity(v_0).to('cm/s')
            
        except KeyError:
            v_0 = parse_quantity('1 cm/s')
            logger.warning('v_0 was not given in the config file! Using %f km/s', v_0)

            
        velocities = 0.5 * (v_inner + v_outer)
        densities = calc_power_law_density(velocities, v_0, rho_0, exponent)
        densities = calculate_density_after_time(densities, time_0, time_explosion)
        return densities
示例#3
0
def parse_quantity_linspace(quantity_linspace_dictionary, add_one=True):
    """
    parse a dictionary of the following kind
    {'start': 5000 km/s,
     'stop': 10000 km/s,
     'num': 1000}

    Parameters
    ----------

    quantity_linspace_dictionary: ~dict

    add_one: boolean, default: True

    Returns
    -------

    ~np.array

    """

    start = parse_quantity(quantity_linspace_dictionary['start'])
    stop = parse_quantity(quantity_linspace_dictionary['stop'])

    try:
        stop = stop.to(start.unit)
    except u.UnitsError:
        raise ConfigurationError('"start" and "stop" keyword must be compatible quantities')

    num = quantity_linspace_dictionary['num']
    if add_one:
        num += 1

    return np.linspace(start.value, stop.value, num=num) * start.unit
示例#4
0
    def test_spectrum_section(self):
        assert_almost_equal(self.config['spectrum']['start'].value,
                            parse_quantity(self.yaml_data['spectrum']['start']).value)
        assert_almost_equal(self.config['spectrum']['end'].value,
                            parse_quantity(self.yaml_data['spectrum']['stop']).value)

        assert self.config['spectrum']['bins'] == self.yaml_data['spectrum']['num']
示例#5
0
    def test_spectrum_section(self):
        assert_almost_equal(self.config['spectrum']['start'],
                            parse_quantity(self.yaml_data['spectrum']['start']))
        assert_almost_equal(self.config['spectrum']['end'],
                            parse_quantity(self.yaml_data['spectrum']['stop']))

        assert self.config['spectrum']['bins'] == self.yaml_data['spectrum']['num']
示例#6
0
def parse_spectral_bin(spectral_bin_boundary_1, spectral_bin_boundary_2):
    spectral_bin_boundary_1 = parse_quantity(spectral_bin_boundary_1).to('Angstrom', u.spectral())
    spectral_bin_boundary_2 = parse_quantity(spectral_bin_boundary_2).to('Angstrom', u.spectral())

    spectrum_start_wavelength = min(spectral_bin_boundary_1, spectral_bin_boundary_2)
    spectrum_end_wavelength = max(spectral_bin_boundary_1, spectral_bin_boundary_2)

    return spectrum_start_wavelength, spectrum_end_wavelength
示例#7
0
    def parse_artis_density(density_file_dict, time_explosion):
        density_file = density_file_dict["name"]
        for i, line in enumerate(file(density_file)):
            if i == 0:
                no_of_shells = np.int64(line.strip())
            elif i == 1:
                time_of_model = u.Quantity(float(line.strip()), "day").to("s")
            elif i == 2:
                break

        velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
        # converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
        velocities = u.Quantity(np.append([0], velocities), "km/s").to("cm/s")
        mean_densities_0 = u.Quantity(10 ** mean_densities_0, "g/cm^3")

        mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)

        # Verifying information
        if len(mean_densities) == no_of_shells:
            logger.debug("Verified ARTIS file %s (no_of_shells=length of dataset)", density_file)
        else:
            raise ConfigurationError(
                "Error in ARTIS file %s - Number of shells not the same as dataset length" % density_file
            )

        min_shell = 1
        max_shell = no_of_shells

        v_inner = velocities[:-1]
        v_outer = velocities[1:]

        volumes = (4 * np.pi / 3) * (time_of_model ** 3) * (v_outer ** 3 - v_inner ** 3)
        masses = (volumes * mean_densities_0 / constants.M_sun).to(1)

        logger.info(
            "Read ARTIS configuration file %s - found %d zones with total mass %g Msun",
            density_file,
            no_of_shells,
            sum(masses.value),
        )

        if "v_lowest" in density_file_dict:
            v_lowest = parse_quantity(density_file_dict["v_lowest"]).to("cm/s").value
            min_shell = v_inner.value.searchsorted(v_lowest)
        else:
            min_shell = 1

        if "v_highest" in density_file_dict:
            v_highest = parse_quantity(density_file_dict["v_highest"]).to("cm/s").value
            max_shell = v_outer.value.searchsorted(v_highest)
        else:
            max_shell = no_of_shells

        v_inner = v_inner[min_shell:max_shell]
        v_outer = v_outer[min_shell:max_shell]
        mean_densities = mean_densities[min_shell:max_shell]

        return v_inner, v_outer, mean_densities, min_shell, max_shell
示例#8
0
    def parse_artis_density(density_file_dict, time_explosion):
        density_file = density_file_dict['name']
        for i, line in enumerate(file(density_file)):
            if i == 0:
                no_of_shells = np.int64(line.strip())
            elif i == 1:
                time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
            elif i == 2:
                break

        velocities, mean_densities_0 = np.recfromtxt(density_file, skip_header=2, usecols=(1, 2), unpack=True)
        #converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
        velocities = u.Quantity(np.append([0], velocities), 'km/s').to('cm/s')
        mean_densities_0 = u.Quantity(10 ** mean_densities_0, 'g/cm^3')

        mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)


        #Verifying information
        if len(mean_densities) == no_of_shells:
            logger.debug('Verified ARTIS file %s (no_of_shells=length of dataset)', density_file)
        else:
            raise ConfigurationError(
                'Error in ARTIS file %s - Number of shells not the same as dataset length' % density_file)

        min_shell = 1
        max_shell = no_of_shells

        v_inner = velocities[:-1]
        v_outer = velocities[1:]

        volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
        masses = (volumes * mean_densities_0 / constants.M_sun).to(1)

        logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', density_file,
                    no_of_shells, sum(masses.value))

        if 'v_lowest' in density_file_dict:
            v_lowest = parse_quantity(density_file_dict['v_lowest']).to('cm/s').value
            min_shell = v_inner.value.searchsorted(v_lowest)
        else:
            min_shell = 1

        if 'v_highest' in density_file_dict:
            v_highest = parse_quantity(density_file_dict['v_highest']).to('cm/s').value
            max_shell = v_outer.value.searchsorted(v_highest)
        else:
            max_shell = no_of_shells

        v_inner = v_inner[min_shell:max_shell]
        v_outer = v_outer[min_shell:max_shell]
        mean_densities = mean_densities[min_shell:max_shell]

        return v_inner, v_outer, mean_densities, min_shell, max_shell
示例#9
0
 def test_velocities(self):
     assert_almost_equal(
         parse_quantity(self.yaml_data['model']['structure']['velocity']
                        ['start']).cgs.value,
         self.config.structure.v_inner[0].cgs.value)
     assert_almost_equal(
         parse_quantity(self.yaml_data['model']['structure']['velocity']
                        ['stop']).cgs.value,
         self.config.structure.v_outer[-1].cgs.value)
     assert len(self.config.structure.v_outer) == (
         self.yaml_data['model']['structure']['velocity']['num'])
示例#10
0
def parse_supernova_section(supernova_dict):
    """
    Parse the supernova section

    Parameters
    ----------

    supernova_dict: dict
        YAML parsed supernova dict

    Returns
    -------

    config_dict: dict

    """
    config_dict = {}

    #parse luminosity
    luminosity_value, luminosity_unit = supernova_dict[
        'luminosity_requested'].strip().split()

    if luminosity_unit == 'log_lsun':
        config_dict['luminosity_requested'] = 10**(
            float(luminosity_value) +
            np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
    else:
        config_dict['luminosity_requested'] = (
            float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')

    config_dict['time_explosion'] = parse_quantity(
        supernova_dict['time_explosion']).to('s')

    if 'distance' in supernova_dict:
        config_dict['distance'] = parse_quantity(supernova_dict['distance'])
    else:
        config_dict['distance'] = None

    if 'luminosity_wavelength_start' in supernova_dict:
        config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
            to('Hz', u.spectral())
    else:
        config_dict['luminosity_nu_end'] = np.inf * u.Hz

    if 'luminosity_wavelength_end' in supernova_dict:
        config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
            to('Hz', u.spectral())
    else:
        config_dict['luminosity_nu_start'] = 0.0 * u.Hz

    return config_dict
示例#11
0
def parse_supernova_section(supernova_dict):
    """
    Parse the supernova section

    Parameters
    ----------

    supernova_dict: dict
        YAML parsed supernova dict

    Returns
    -------

    config_dict: dict

    """
    config_dict = {}

    # parse luminosity
    luminosity_value, luminosity_unit = supernova_dict["luminosity_requested"].strip().split()

    if luminosity_unit == "log_lsun":
        config_dict["luminosity_requested"] = (
            10 ** (float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
        )
    else:
        config_dict["luminosity_requested"] = (float(luminosity_value) * u.Unit(luminosity_unit)).to("erg/s")

    config_dict["time_explosion"] = parse_quantity(supernova_dict["time_explosion"]).to("s")

    if "distance" in supernova_dict:
        config_dict["distance"] = parse_quantity(supernova_dict["distance"])
    else:
        config_dict["distance"] = None

    if "luminosity_wavelength_start" in supernova_dict:
        config_dict["luminosity_nu_end"] = parse_quantity(supernova_dict["luminosity_wavelength_start"]).to(
            "Hz", u.spectral()
        )
    else:
        config_dict["luminosity_nu_end"] = np.inf * u.Hz

    if "luminosity_wavelength_end" in supernova_dict:
        config_dict["luminosity_nu_start"] = parse_quantity(supernova_dict["luminosity_wavelength_end"]).to(
            "Hz", u.spectral()
        )
    else:
        config_dict["luminosity_nu_start"] = 0.0 * u.Hz

    return config_dict
示例#12
0
def parse_supernova_section(supernova_dict):
    """
    Parse the supernova section

    Parameters
    ----------

    supernova_dict: dict
        YAML parsed supernova dict

    Returns
    -------

    config_dict: dict

    """
    config_dict = {}

    #parse luminosity
    luminosity_value, luminosity_unit = supernova_dict['luminosity_requested'].strip().split()

    if luminosity_unit == 'log_lsun':
        config_dict['luminosity_requested'] = 10 ** (
        float(luminosity_value) + np.log10(constants.L_sun.cgs.value)) * u.erg / u.s
    else:
        config_dict['luminosity_requested'] = (float(luminosity_value) * u.Unit(luminosity_unit)).to('erg/s')

    config_dict['time_explosion'] = parse_quantity(supernova_dict['time_explosion']).to('s')

    if 'distance' in supernova_dict:
        config_dict['distance'] = parse_quantity(supernova_dict['distance'])
    else:
        config_dict['distance'] = None

    if 'luminosity_wavelength_start' in supernova_dict:
        config_dict['luminosity_nu_end'] = parse_quantity(supernova_dict['luminosity_wavelength_start']). \
            to('Hz', u.spectral())
    else:
        config_dict['luminosity_nu_end'] = np.inf * u.Hz

    if 'luminosity_wavelength_end' in supernova_dict:
        config_dict['luminosity_nu_start'] = parse_quantity(supernova_dict['luminosity_wavelength_end']). \
            to('Hz', u.spectral())
    else:
        config_dict['luminosity_nu_start'] = 0.0 * u.Hz

    return config_dict
示例#13
0
def test_parse_quantity():
    q1 = parse_quantity('5 km/s')
    assert q1.value == 5.
    assert q1.unit == u.Unit('km/s')

    with pytest.raises(MalformedQuantityError):
        parse_quantity(5)

    with pytest.raises(MalformedQuantityError):
        parse_quantity('abcd')

    with pytest.raises(MalformedQuantityError):
        parse_quantity('a abcd')

    with pytest.raises(MalformedQuantityError):
        parse_quantity('5 abcd')
示例#14
0
def test_parse_quantity():
    q1 = parse_quantity('5 km/s')
    assert q1.value == 5.
    assert q1.unit == u.Unit('km/s')

    with pytest.raises(MalformedQuantityError):
        parse_quantity(5)

    with pytest.raises(MalformedQuantityError):
        parse_quantity('abcd')

    with pytest.raises(MalformedQuantityError):
        parse_quantity('a abcd')

    with pytest.raises(MalformedQuantityError):
        parse_quantity('5 abcd')
示例#15
0
def read_cmfgen_density(fname):
    """
    Reading a density file of the following structure (example; lines starting with a hash will be ignored):
    The first density describes the mean density in the center of the model and is not used.
    The file consists of a header row and next row contains unit of the respective attributes
    velocity densities electron_densities temperature
    km/s g/cm^3 /cm^3 K
    871.66905 4.2537191e-09 2.5953807e+14 7.6395577
    877.44269 4.2537191e-09 2.5953807e+14 7.6395577

    Rest columns contain abundances of elements and isotopes

    Parameters
    ----------

    fname: str
        filename or path with filename


    Returns
    -------

    time_of_model: ~astropy.units.Quantity
        time at which the model is valid

    velocity: ~np.ndarray
    mean_density: ~np.ndarray
    electron_densities: ~np.ndarray
    temperature: ~np.ndarray

    """
    warnings.warn("The current CMFGEN model parser is deprecated",
                  DeprecationWarning)

    df = pd.read_csv(fname, comment='#', delimiter='\s+', skiprows=[0, 2])

    with open(fname) as fh:
        for row_index, line in enumerate(fh):
            if row_index == 0:
                time_of_model_string = line.strip().replace('t0:', '')
                time_of_model = parse_quantity(time_of_model_string)
            elif row_index == 2:
                quantities = line.split()

    velocity = u.Quantity(df['velocity'].values, quantities[0]).to('cm/s')
    temperature = u.Quantity(df['temperature'].values, quantities[1])[1:]
    mean_density = u.Quantity(df['densities'].values, quantities[2])[1:]
    electron_densities = u.Quantity(df['electron_densities'].values,
                                    quantities[3])[1:]

    return time_of_model, velocity, mean_density, electron_densities, temperature
示例#16
0
    def parse_branch85(density_dict, v_inner, v_outer, time_explosion):

        time_0 = density_dict.pop('time_0', 19.9999584)
        if isinstance(time_0, basestring):
            time_0 = parse_quantity(time_0).to('s')
        else:
            time_0 *= u.s
            logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0)

        density_coefficient = density_dict.pop('density_coefficient', None)
        if density_coefficient is None:
            density_coefficient = 3e29 * u.Unit('g/cm^3')
            logger.debug('density_coefficient not supplied for density type branch85 - using sensible default %g',
                         density_coefficient)
        else:
            density_coefficient = parse_quantity(density_coefficient)

        velocities = 0.5 * (v_inner + v_outer)
        densities = density_coefficient * (velocities.value * 1e-5) ** -7

        densities = calculate_density_after_time(densities, time_0, time_explosion)

        return densities
示例#17
0
    def parse_exponential(density_dict, v_inner, v_outer, time_explosion):
        time_0 = density_dict.pop('time_0', 19.9999584)
        if isinstance(time_0, basestring):
            time_0 = parse_quantity(time_0).to('s').value
        else:
            logger.debug('time_0 not supplied for density branch85 - using sensible default %g', time_0)
        try:
            rho_0 = float(density_dict.pop('rho_0'))
        except KeyError:
            rho_0 = 1e-2
            logger.warning('rho_o was not given in the config! Using %g', rho_0)
        try:
            exponent = density_dict.pop('exponent')
        except KeyError:
            exponent = 2
            logger.warning('exponent was not given in the config file! Using %f', exponent)

        velocities = 0.5 * (v_inner + v_outer)
        densities = calculate_exponential_densities(velocities, v_inner[0], rho_0, exponent)

        return densities
示例#18
0
def read_simple_ascii_density(fname):
    """
    Reading a density file of the following structure (example; lines starting with a hash will be ignored):
    The first density describes the mean density in the center of the model and is not used.
    5 s
    #index velocity [km/s] density [g/cm^3]
    0 1.1e4 1.6e8
    1 1.2e4 1.7e8

    Parameters
    ----------

    fname: str
        filename or path with filename


    Returns
    -------

    time_of_model: ~astropy.units.Quantity
        time at which the model is valid

    data: ~pandas.DataFrame
        data frame containing index, velocity (in km/s) and density
    """

    with open(fname) as fh:
        time_of_model_string = fh.readline().strip()
        time_of_model = parse_quantity(time_of_model_string)

    data = recfromtxt(fname,
                      skip_header=1,
                      names=('index', 'velocity', 'density'),
                      dtype=(int, float, float))
    velocity = (data['velocity'] * u.km / u.s).to('cm/s')
    v_inner, v_outer = velocity[:-1], velocity[1:]
    mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]

    return time_of_model, data['index'], v_inner, v_outer, mean_density
示例#19
0
def read_simple_ascii_density(fname):
    """
    Reading a density file of the following structure (example; lines starting with a hash will be ignored):
    The first density describes the mean density in the center of the model and is not used.
    5 s
    #index velocity [km/s] density [g/cm^3]
    0 1.1e4 1.6e8
    1 1.2e4 1.7e8

    Parameters
    ----------

    fname: str
        filename or path with filename


    Returns
    -------

    time_of_model: ~astropy.units.Quantity
        time at which the model is valid

    data: ~pandas.DataFrame
        data frame containing index, velocity (in km/s) and density
    """

    with open(fname) as fh:
        time_of_model_string = fh.readline().strip()
        time_of_model = parse_quantity(time_of_model_string)

    data = recfromtxt(fname, skip_header=1,
                      names=('index', 'velocity', 'density'),
                      dtype=(int, float, float))
    velocity = (data['velocity'] * u.km / u.s).to('cm/s')
    mean_density = (data['density'] * u.Unit('g/cm^3'))[1:]

    return time_of_model, velocity, mean_density
示例#20
0
    def parse_artis_model_setup_files(model_file_section_dict, time_explosion):

        ###### Reading the structure part of the ARTIS file pair
        structure_fname = model_file_section_dict['structure_fname']

        for i, line in enumerate(file(structure_fname)):
            if i == 0:
                no_of_shells = np.int64(line.strip())
            elif i == 1:
                time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
            elif i == 2:
                break

        artis_model_columns = [
            'velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction',
            'fe52_fraction', 'cr48_fraction'
        ]
        artis_model = np.recfromtxt(structure_fname,
                                    skip_header=2,
                                    usecols=(1, 2, 4, 5, 6, 7),
                                    unpack=True,
                                    dtype=[(item, np.float64)
                                           for item in artis_model_columns])
        #converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
        velocities = u.Quantity(np.append([0], artis_model['velocities']),
                                'km/s').to('cm/s')
        mean_densities_0 = u.Quantity(10**artis_model['mean_densities_0'],
                                      'g/cm^3')

        mean_densities = calculate_density_after_time(mean_densities_0,
                                                      time_of_model,
                                                      time_explosion)

        #Verifying information
        if len(mean_densities) == no_of_shells:
            logger.debug(
                'Verified ARTIS model structure file %s (no_of_shells=length of dataset)',
                structure_fname)
        else:
            raise ConfigurationError(
                'Error in ARTIS file %s - Number of shells not the same as dataset length'
                % structure_fname)

        v_inner = velocities[:-1]
        v_outer = velocities[1:]

        volumes = (4 * np.pi / 3) * (time_of_model**
                                     3) * (v_outer**3 - v_inner**3)
        masses = (volumes * mean_densities_0 / constants.M_sun).to(1)

        logger.info(
            'Read ARTIS configuration file %s - found %d zones with total mass %g Msun',
            structure_fname, no_of_shells, sum(masses.value))

        if 'v_lowest' in model_file_section_dict:
            v_lowest = parse_quantity(
                model_file_section_dict['v_lowest']).to('cm/s').value
            min_shell = v_inner.value.searchsorted(v_lowest)
        else:
            min_shell = 1

        if 'v_highest' in model_file_section_dict:
            v_highest = parse_quantity(
                model_file_section_dict['v_highest']).to('cm/s').value
            max_shell = v_outer.value.searchsorted(v_highest)
        else:
            max_shell = no_of_shells
        artis_model = artis_model[min_shell:max_shell]
        v_inner = v_inner[min_shell:max_shell]
        v_outer = v_outer[min_shell:max_shell]
        mean_densities = mean_densities[min_shell:max_shell]

        ###### Reading the abundance part of the ARTIS file pair
        abundances_fname = model_file_section_dict['abundances_fname']
        abundances = pd.DataFrame(
            np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
            index=np.arange(1, 31))

        ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
        co_stable = abundances.ix[27] - artis_model['co56_fraction']
        fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
        mn_stable = abundances.ix[25] - 0.0
        cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
        v_stable = abundances.ix[23] - 0.0
        ti_stable = abundances.ix[22] - 0.0

        abundances.ix[28] = ni_stable
        abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
            -(time_explosion * inv_ni56_efolding_time).to(1).value)

        abundances.ix[27] = co_stable
        abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
            -(time_explosion * inv_co56_efolding_time).to(1).value)
        abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
                              (inv_ni56_efolding_time - inv_co56_efolding_time)) * \
                             (np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
                                 -(inv_ni56_efolding_time * time_explosion).to(1).value))

        abundances.ix[26] = fe_stable
        abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
            -(time_explosion * inv_fe52_efolding_time).to(1).value)
        abundances.ix[26] += (
            (artis_model['co56_fraction'] * inv_ni56_efolding_time -
             artis_model['co56_fraction'] * inv_co56_efolding_time +
             artis_model['ni56_fraction'] * inv_ni56_efolding_time -
             artis_model['ni56_fraction'] * inv_co56_efolding_time -
             artis_model['co56_fraction'] * inv_ni56_efolding_time *
             np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) +
             artis_model['co56_fraction'] * inv_co56_efolding_time *
             np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) -
             artis_model['ni56_fraction'] * inv_ni56_efolding_time *
             np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) +
             artis_model['ni56_fraction'] * inv_co56_efolding_time *
             np.exp(-(inv_ni56_efolding_time * time_explosion).to(1).value)) /
            (inv_ni56_efolding_time - inv_co56_efolding_time))

        abundances.ix[25] = mn_stable
        abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
                              (inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
                             (np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
                                 -(inv_fe52_efolding_time * time_explosion).to(1).value))

        abundances.ix[24] = cr_stable
        abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
            -(time_explosion * inv_cr48_efolding_time).to(1).value)
        abundances.ix[24] += (
            (artis_model['fe52_fraction'] * inv_fe52_efolding_time -
             artis_model['fe52_fraction'] * inv_mn52_efolding_time -
             artis_model['fe52_fraction'] * inv_fe52_efolding_time *
             np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) +
             artis_model['fe52_fraction'] * inv_mn52_efolding_time *
             np.exp(-(inv_fe52_efolding_time * time_explosion).to(1).value)) /
            (inv_fe52_efolding_time - inv_mn52_efolding_time))

        abundances.ix[23] = v_stable
        abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
                              (inv_cr48_efolding_time - inv_v48_efolding_time)) * \
                             (np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
                                 -(inv_cr48_efolding_time * time_explosion).to(1).value))

        abundances.ix[22] = ti_stable
        abundances.ix[22] += (
            (artis_model['cr48_fraction'] * inv_cr48_efolding_time -
             artis_model['cr48_fraction'] * inv_v48_efolding_time -
             artis_model['cr48_fraction'] * inv_cr48_efolding_time *
             np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) +
             artis_model['cr48_fraction'] * inv_v48_efolding_time *
             np.exp(-(inv_cr48_efolding_time * time_explosion).to(1).value)) /
            (inv_cr48_efolding_time - inv_v48_efolding_time))

        if 'split_shells' in model_file_section_dict:
            split_shells = int(model_file_section_dict['split_shells'])
        else:
            split_shells = 1

        if split_shells > 1:
            logger.info('Increasing the number of shells by a factor of %s' %
                        split_shells)
            no_of_shells = len(v_inner)
            velocities = quantity_linspace(v_inner[0], v_outer[-1],
                                           no_of_shells * split_shells + 1)
            v_inner = velocities[:-1]
            v_outer = velocities[1:]
            old_mean_densities = mean_densities
            mean_densities = np.empty(
                no_of_shells * split_shells) * old_mean_densities.unit
            new_abundance_data = np.empty(
                (abundances.values.shape[0], no_of_shells * split_shells))
            for i in xrange(split_shells):
                mean_densities[i::split_shells] = old_mean_densities
                new_abundance_data[:, i::split_shells] = abundances.values

            abundances = pd.DataFrame(new_abundance_data,
                                      index=abundances.index)

            #def parser_simple_ascii_model

        return v_inner, v_outer, mean_densities, abundances
示例#21
0
def test_quantity_parser_normal():
    q1 = parse_quantity('5 km/s')
    assert q1.value == 5.
    assert q1.unit == u.Unit('km/s')
示例#22
0
def test_quantity_parser_malformed_quantity2():
    with pytest.raises(MalformedQuantityError):
        q1 = parse_quantity('5 abcd')
示例#23
0
    def parse_artis_model_setup_files(model_file_section_dict, time_explosion):

        ###### Reading the structure part of the ARTIS file pair
        structure_fname = model_file_section_dict['structure_fname']

        for i, line in enumerate(file(structure_fname)):
            if i == 0:
                no_of_shells = np.int64(line.strip())
            elif i == 1:
                time_of_model = u.Quantity(float(line.strip()), 'day').to('s')
            elif i == 2:
                break

        artis_model_columns = ['velocities', 'mean_densities_0', 'ni56_fraction', 'co56_fraction', 'fe52_fraction',
                               'cr48_fraction']
        artis_model = np.recfromtxt(structure_fname, skip_header=2, usecols=(1, 2, 4, 5, 6, 7), unpack=True,
                                    dtype=[(item, np.float64) for item in artis_model_columns])
        #converting densities from log(g/cm^3) to g/cm^3 and stretching it to the current ti
        velocities = u.Quantity(np.append([0], artis_model['velocities']), 'km/s').to('cm/s')
        mean_densities_0 = u.Quantity(10 ** artis_model['mean_densities_0'], 'g/cm^3')

        mean_densities = calculate_density_after_time(mean_densities_0, time_of_model, time_explosion)


        #Verifying information
        if len(mean_densities) == no_of_shells:
            logger.debug('Verified ARTIS model structure file %s (no_of_shells=length of dataset)', structure_fname)
        else:
            raise ConfigurationError(
                'Error in ARTIS file %s - Number of shells not the same as dataset length' % structure_fname)

        v_inner = velocities[:-1]
        v_outer = velocities[1:]

        volumes = (4 * np.pi / 3) * (time_of_model ** 3) * ( v_outer ** 3 - v_inner ** 3)
        masses = (volumes * mean_densities_0 / constants.M_sun).to(1)

        logger.info('Read ARTIS configuration file %s - found %d zones with total mass %g Msun', structure_fname,
                    no_of_shells, sum(masses.value))

        if 'v_lowest' in model_file_section_dict:
            v_lowest = parse_quantity(model_file_section_dict['v_lowest']).to('cm/s').value
            min_shell = v_inner.value.searchsorted(v_lowest)
        else:
            min_shell = 1

        if 'v_highest' in model_file_section_dict:
            v_highest = parse_quantity(model_file_section_dict['v_highest']).to('cm/s').value
            max_shell = v_outer.value.searchsorted(v_highest)
        else:
            max_shell = no_of_shells
        artis_model = artis_model[min_shell:max_shell]
        v_inner = v_inner[min_shell:max_shell]
        v_outer = v_outer[min_shell:max_shell]
        mean_densities = mean_densities[min_shell:max_shell]

        ###### Reading the abundance part of the ARTIS file pair
        abundances_fname = model_file_section_dict['abundances_fname']
        abundances = pd.DataFrame(np.loadtxt(abundances_fname)[min_shell:max_shell, 1:].transpose(),
                                  index=np.arange(1, 31))

        ni_stable = abundances.ix[28] - artis_model['ni56_fraction']
        co_stable = abundances.ix[27] - artis_model['co56_fraction']
        fe_stable = abundances.ix[26] - artis_model['fe52_fraction']
        mn_stable = abundances.ix[25] - 0.0
        cr_stable = abundances.ix[24] - artis_model['cr48_fraction']
        v_stable = abundances.ix[23] - 0.0
        ti_stable = abundances.ix[22] - 0.0

        abundances.ix[28] = ni_stable
        abundances.ix[28] += artis_model['ni56_fraction'] * np.exp(
            -(time_explosion * inv_ni56_efolding_time).to(1).value)

        abundances.ix[27] = co_stable
        abundances.ix[27] += artis_model['co56_fraction'] * np.exp(
            -(time_explosion * inv_co56_efolding_time).to(1).value)
        abundances.ix[27] += (inv_ni56_efolding_time * artis_model['ni56_fraction'] /
                              (inv_ni56_efolding_time - inv_co56_efolding_time)) * \
                             (np.exp(-(inv_co56_efolding_time * time_explosion).to(1).value) - np.exp(
                                 -(inv_ni56_efolding_time * time_explosion).to(1).value))

        abundances.ix[26] = fe_stable
        abundances.ix[26] += artis_model['fe52_fraction'] * np.exp(
            -(time_explosion * inv_fe52_efolding_time).to(1).value)
        abundances.ix[26] += ((artis_model['co56_fraction'] * inv_ni56_efolding_time
                               - artis_model['co56_fraction'] * inv_co56_efolding_time
                               + artis_model['ni56_fraction'] * inv_ni56_efolding_time
                               - artis_model['ni56_fraction'] * inv_co56_efolding_time
                               - artis_model['co56_fraction'] * inv_ni56_efolding_time * np.exp(
            -(inv_co56_efolding_time * time_explosion).to(1).value)
                               + artis_model['co56_fraction'] * inv_co56_efolding_time * np.exp(
            -(inv_co56_efolding_time * time_explosion).to(1).value)
                               - artis_model['ni56_fraction'] * inv_ni56_efolding_time * np.exp(
            -(inv_co56_efolding_time * time_explosion).to(1).value)
                               + artis_model['ni56_fraction'] * inv_co56_efolding_time * np.exp(
            -(inv_ni56_efolding_time * time_explosion).to(1).value))
                              / (inv_ni56_efolding_time - inv_co56_efolding_time))

        abundances.ix[25] = mn_stable
        abundances.ix[25] += (inv_fe52_efolding_time * artis_model['fe52_fraction'] /
                              (inv_fe52_efolding_time - inv_mn52_efolding_time)) * \
                             (np.exp(-(inv_mn52_efolding_time * time_explosion).to(1).value) - np.exp(
                                 -(inv_fe52_efolding_time * time_explosion).to(1).value))

        abundances.ix[24] = cr_stable
        abundances.ix[24] += artis_model['cr48_fraction'] * np.exp(
            -(time_explosion * inv_cr48_efolding_time).to(1).value)
        abundances.ix[24] += ((artis_model['fe52_fraction'] * inv_fe52_efolding_time
                               - artis_model['fe52_fraction'] * inv_mn52_efolding_time
                               - artis_model['fe52_fraction'] * inv_fe52_efolding_time * np.exp(
            -(inv_mn52_efolding_time * time_explosion).to(1).value)
                               + artis_model['fe52_fraction'] * inv_mn52_efolding_time * np.exp(
            -(inv_fe52_efolding_time * time_explosion).to(1).value))
                              / (inv_fe52_efolding_time - inv_mn52_efolding_time))

        abundances.ix[23] = v_stable
        abundances.ix[23] += (inv_cr48_efolding_time * artis_model['cr48_fraction'] /
                              (inv_cr48_efolding_time - inv_v48_efolding_time)) * \
                             (np.exp(-(inv_v48_efolding_time * time_explosion).to(1).value) - np.exp(
                                 -(inv_cr48_efolding_time * time_explosion).to(1).value))

        abundances.ix[22] = ti_stable
        abundances.ix[22] += ((artis_model['cr48_fraction'] * inv_cr48_efolding_time
                               - artis_model['cr48_fraction'] * inv_v48_efolding_time
                               - artis_model['cr48_fraction'] * inv_cr48_efolding_time * np.exp(
            -(inv_v48_efolding_time * time_explosion).to(1).value)
                               + artis_model['cr48_fraction'] * inv_v48_efolding_time * np.exp(
            -(inv_cr48_efolding_time * time_explosion).to(1).value))
                              / (inv_cr48_efolding_time - inv_v48_efolding_time))

        if 'split_shells' in model_file_section_dict:
            split_shells = int(model_file_section_dict['split_shells'])
        else:
            split_shells = 1

        if split_shells > 1:
            logger.info('Increasing the number of shells by a factor of %s' % split_shells)
            no_of_shells = len(v_inner)
            velocities = np.linspace(v_inner[0], v_outer[-1], no_of_shells * split_shells + 1)
            v_inner = velocities[:-1]
            v_outer = velocities[1:]
            old_mean_densities = mean_densities
            mean_densities = np.empty(no_of_shells * split_shells) * old_mean_densities.unit
            new_abundance_data = np.empty((abundances.values.shape[0], no_of_shells * split_shells))
            for i in xrange(split_shells):
                mean_densities[i::split_shells] = old_mean_densities
                new_abundance_data[:, i::split_shells] = abundances.values

            abundances = pd.DataFrame(new_abundance_data, index=abundances.index)




            #def parser_simple_ascii_model

        return v_inner, v_outer, mean_densities, abundances
示例#24
0
 def parse_uniform(density_dict, v_inner, v_outer, time_explosion):
     no_of_shells = len(v_inner)
     return parse_quantity(density_dict['value']).to('g cm^-3') * np.ones(no_of_shells)
示例#25
0
def test_quantity_parser_normal():
    q1 = parse_quantity('5 km/s')
    assert q1.value == 5.
    assert q1.unit == u.Unit('km/s')
示例#26
0
def test_quantity_parser_malformed_quantity2():
    with pytest.raises(MalformedQuantityError):
        q1 = parse_quantity('5 abcd')
示例#27
0
 def test_velocities(self):
     assert_almost_equal(parse_quantity(self.yaml_data['model']['structure']['velocity']['start']),
                         self.config.structure.v_inner[0])
     assert_almost_equal(parse_quantity(self.yaml_data['model']['structure']['velocity']['stop']),
                 self.config.structure.v_outer[-1])
     assert len(self.config.structure.v_outer) == (self.yaml_data['model']['structure']['velocity']['num'])
示例#28
0
    def from_config_dict(cls, raw_dict, atom_data=None, test_parser=False):
        """
        Reading in from a YAML file and commandline args. Preferring commandline args when given

        Parameters
        ----------

        fname : filename for the yaml file

        args : namespace object
            Not implemented Yet

        Returns
        -------

        `tardis.config_reader.TARDISConfiguration`

        """

        config_dict = {}
        raw_dict = copy.deepcopy(raw_dict)

        #First let's see if we can find an atom_db anywhere:
        if test_parser:
          atom_data = None
        elif 'atom_data' in raw_dict.keys():
            atom_data_fname = raw_dict['atom_data']
            config_dict['atom_data_fname'] = atom_data_fname
        else:
            raise ConfigurationError('No atom_data key found in config or command line')



        if atom_data is None and not test_parser:
            logger.info('Reading Atomic Data from %s', atom_data_fname)
            atom_data = atomic.AtomData.from_hdf5(atom_data_fname)
        else:
            atom_data = atom_data



        #Parsing supernova dictionary
        config_dict['supernova'] = parse_supernova_section(raw_dict['supernova'])

        #Parsing the model section
        model_section = raw_dict.pop('model')
        v_inner = None
        v_outer = None
        mean_densities = None
        abundances = None


        if 'file' in model_section:
            v_inner, v_outer, mean_densities, abundances = parse_model_file_section(model_section.pop('file'),
                                                                                    config_dict['supernova']['time_explosion'])
            no_of_shells = len(v_inner)

        structure_config_dict = {}

        if 'structure' in model_section:
        #Trying to figure out the structure (number of shells)

            structure_section = model_section.pop('structure')
            inner_boundary_index, outer_boundary_index = None, None
            try:
                structure_section_type = structure_section['type']
            except KeyError:
                raise ConfigurationError('Structure section requires "type" keyword')


            if structure_section_type == 'specific':
                velocities = parse_quantity_linspace(structure_section['velocity']).to('cm/s')
                v_inner, v_outer = velocities[:-1], velocities[1:]

                mean_densities = parse_density_section(structure_section['density'], v_inner, v_outer,
                                                       config_dict['supernova']['time_explosion'])

            elif structure_section_type == 'file':
                v_inner_boundary, v_outer_boundary = structure_section.get('v_inner_boundary', 0 * u.km/u.s), \
                                                     structure_section.get('v_outer_boundary', np.inf * u.km/u.s)

                if not hasattr(v_inner_boundary, 'unit'):
                    v_inner_boundary = parse_quantity(v_inner_boundary)

                if not hasattr(v_outer_boundary, 'unit'):
                    v_outer_boundary = parse_quantity(v_outer_boundary)

                v_inner, v_outer, mean_densities, inner_boundary_index, outer_boundary_index =\
                    read_density_file(structure_section['filename'], structure_section['filetype'],
                                      config_dict['supernova']['time_explosion'], v_inner_boundary, v_outer_boundary)
        else:
            raise ConfigurationError('structure section required in configuration file')


        r_inner = config_dict['supernova']['time_explosion'] * v_inner
        r_outer = config_dict['supernova']['time_explosion'] * v_outer
        r_middle = 0.5 * (r_inner + r_outer)

        structure_config_dict['v_inner'] = v_inner
        structure_config_dict['v_outer'] = v_outer
        structure_config_dict['mean_densities'] = mean_densities
        no_of_shells = len(v_inner)
        structure_config_dict['no_of_shells'] = no_of_shells
        structure_config_dict['r_inner'] = r_inner
        structure_config_dict['r_outer'] = r_outer
        structure_config_dict['r_middle'] = r_middle
        structure_config_dict['volumes'] = (4. / 3) * np.pi * (r_outer ** 3 - r_inner ** 3)




        config_dict['structure'] = structure_config_dict
        #Now that the structure section is parsed we move on to the abundances



        abundances_section  = model_section.pop('abundances')
        abundances_type = abundances_section.pop('type')

        if abundances_type == 'uniform':
            abundances = pd.DataFrame(columns=np.arange(no_of_shells),
                  index=pd.Index(np.arange(1, 120), name='atomic_number'), dtype=np.float64)

            for element_symbol_string in abundances_section:

                z = element_symbol2atomic_number(element_symbol_string)
                abundances.ix[z] = float(abundances_section[element_symbol_string])

        elif abundances_type == 'file':
            index, abundances = read_abundances_file(abundances_section['filename'], abundances_section['filetype'],
                                                     inner_boundary_index, outer_boundary_index)
            if len(index) != no_of_shells:
                raise ConfigurationError('The abundance file specified has not the same number of cells'
                'as the specified density profile')

        abundances = abundances.replace(np.nan, 0.0)

        abundances = abundances[abundances.sum(axis=1) > 0]

        norm_factor = abundances.sum(axis=0)

        if np.any(np.abs(norm_factor - 1) > 1e-12):
            logger.warning("Abundances have not been normalized to 1. - normalizing")
            abundances /= norm_factor

        config_dict['abundances'] = abundances



        ########### DOING PLASMA SECTION ###############

        plasma_section = raw_dict.pop('plasma')
        plasma_config_dict = {}

        if plasma_section['ionization'] not in ('nebular', 'lte'):
            raise ConfigurationError('plasma_type only allowed to be "nebular" or "lte"')
        plasma_config_dict['ionization'] = plasma_section['ionization']


        if plasma_section['excitation'] not in ('dilute-lte', 'lte'):
            raise ConfigurationError('plasma_type only allowed to be "nebular" or "lte"')
        plasma_config_dict['excitation'] = plasma_section['excitation']

        if plasma_section['radiative_rates_type'] not in ('dilute-blackbody', 'detailed'):
            raise ConfigurationError('radiative_rates_types must be either "dilute-blackbody" or "detailed"')
        plasma_config_dict['radiative_rates_type'] = plasma_section['radiative_rates_type']

        if plasma_section['line_interaction_type'] not in ('scatter', 'downbranch', 'macroatom'):
            raise ConfigurationError('radiative_rates_types must be either "scatter", "downbranch", or "macroatom"')
        plasma_config_dict['line_interaction_type'] = plasma_section['line_interaction_type']

        if 'w_epsilon' in plasma_section:
            plasma_config_dict['w_epsilon'] = plasma_section['w_epsilon']
        else:
            logger.warn('"w_epsilon" not specified in plasma section - setting it to 1e-10')
            plasma_config_dict['w_epsilon'] = 1e-10

        if 'delta_treatment' in plasma_section:
            plasma_config_dict['delta_treatment'] = plasma_section['delta_treatment']
        else:
            logger.warn('"delta_treatment" not specified in plasma section - defaulting to None')
            plasma_config_dict['delta_treatment'] = None

        if 'initial_t_inner' in plasma_section:
            plasma_config_dict['t_inner'] = parse_quantity(plasma_section['initial_t_inner']).to('K')
        else:
            plasma_config_dict['t_inner'] = (((config_dict['supernova']['luminosity_requested'] / \
                                            (4 * np.pi * r_inner[0]**2 * constants.sigma_sb))**.5)**.5).to('K')
            logger.info('"initial_t_inner" is not specified in the plasma section - '
                        'initializing to %s with given luminosity', plasma_config_dict['t_inner'])

        if 'initial_t_rads' in plasma_section:
            if isinstance('initial_t_rads', basestring):
                    uniform_t_rads = parse_quantity(plasma_section['initial_t_rads'])
                    plasma_config_dict['t_rads'] = u.Quantity(np.ones(no_of_shells) * uniform_t_rads.value, u.K)

            elif astropy.utils.isiterable(plasma_section['initial_t_rads']):
                assert len(plasma_section['initial_t_rads']) == no_of_shells
                plasma_config_dict['t_rads'] = u.Quantity(plasma_section['initial_t_rads'], u.K)
        else:
            logger.info('No "initial_t_rads" specified - initializing with 10000 K')

            plasma_config_dict['t_rads'] =  u.Quantity(np.ones(no_of_shells) * 10000., u.K)

        ##### NLTE subsection of Plasma start
        nlte_config_dict = {}
        nlte_species = []
        if 'nlte' in plasma_section:
            nlte_section = plasma_section['nlte']
            if 'species' in nlte_section:
                nlte_species_list = nlte_section.pop('species')
                for species_string in nlte_species_list:
                    nlte_species.append(species_string_to_tuple(species_string))

                nlte_config_dict['species'] = nlte_species
                nlte_config_dict['species_string'] = nlte_species_list
                nlte_config_dict.update(nlte_section)

                if 'coronal_approximation' not in nlte_section:
                    logger.debug('NLTE "coronal_approximation" not specified in NLTE section - defaulting to False')
                    nlte_config_dict['coronal_approximation'] = False

                if 'classical_nebular' not in nlte_section:
                    logger.debug('NLTE "classical_nebular" not specified in NLTE section - defaulting to False')
                    nlte_config_dict['classical_nebular'] = False


            elif nlte_section: #checks that the dictionary is not empty
                logger.warn('No "species" given - ignoring other NLTE options given:\n%s',
                            pp.pformat(nlte_section))

        if not nlte_config_dict:
            nlte_config_dict['species'] = []

        plasma_config_dict['nlte'] = nlte_config_dict



        #^^^^^^^ NLTE subsection of Plasma end

        config_dict['plasma'] = plasma_config_dict


        #^^^^^^^^^^^^^^ End of Plasma Section

        ##### Monte Carlo Section

        montecarlo_section = raw_dict.pop('montecarlo')
        montecarlo_config_dict = {}

        #PARSING convergence section
        convergence_variables = ['t_inner', 't_rad', 'w']
        convergence_config_dict = {}
        if 'convergence_strategy' in montecarlo_section:

            convergence_section = montecarlo_section.pop('convergence_strategy')
            if 'lock_t_inner_cycles' in convergence_section:
                lock_t_inner_cycles = convergence_section['lock_t_inner_cycles']
                logger.info('lock_t_inner_cycles set to %d cycles', lock_t_inner_cycles)
            else:
                lock_t_inner_cycles = None

            if 't_inner_update_exponent' in convergence_section:
                t_inner_update_exponent = convergence_section['t_inner_update_exponent']
                logger.info('t_inner update exponent set to %g', t_inner_update_exponent)
            else:
                t_inner_update_exponent = None

            if convergence_section['type'] == 'damped':
                convergence_config_dict['type'] == 'damped'
                global_damping_constant = convergence_section['damping_constant']

                for convergence_variable in convergence_variables:
                    convergence_parameter_name = convergence_variable
                    current_convergence_parameters = {}
                    convergence_config_dict[convergence_parameter_name] = current_convergence_parameters

                    if convergence_variable in convergence_section:
                        current_convergence_parameters['damping_constant'] \
                            = convergence_section[convergence_variable]['damping_constant']
                    else:
                        current_convergence_parameters['damping_constant'] = global_damping_constant

            elif convergence_section['type'] == 'specific':

                convergence_config_dict['type'] = 'specific'

                global_convergence_parameters = {}
                global_convergence_parameters['damping_constant'] = convergence_section['damping_constant']
                global_convergence_parameters['threshold'] = convergence_section['threshold']

                global_convergence_parameters['fraction'] = convergence_section['fraction']

                for convergence_variable in convergence_variables:
                    convergence_parameter_name = convergence_variable
                    current_convergence_parameters = {}

                    convergence_config_dict[convergence_parameter_name] = current_convergence_parameters
                    if convergence_variable in convergence_section:
                        for param in global_convergence_parameters.keys():
                            if param == 'fraction' and convergence_variable == 't_inner':
                                continue
                            if param in convergence_section[convergence_variable]:
                                current_convergence_parameters[param] = convergence_section[convergence_variable][param]
                            else:
                                current_convergence_parameters[param] = global_convergence_parameters[param]
                    else:
                        convergence_config_dict[convergence_parameter_name] = global_convergence_parameters.copy()

                global_convergence_parameters['hold'] = convergence_section['hold']
                convergence_config_dict['global_convergence_parameters'] = global_convergence_parameters

            else:
                raise ValueError("convergence criteria unclear %s", convergence_section['type'])



        else:
            lock_t_inner_cycles = None
            t_inner_update_exponent = None
            logger.warning('No convergence criteria selected - just damping by 0.5 for w, t_rad and t_inner')
            convergence_config_dict['type'] = 'damped'
            for convergence_variable in convergence_variables:
                convergence_parameter_name = convergence_variable
                convergence_config_dict[convergence_parameter_name] = dict(damping_constant=0.5)
        if lock_t_inner_cycles is None:
            logger.warning('t_inner update lock cycles not set - defaulting to 1')
            lock_t_inner_cycles = 1
        if t_inner_update_exponent is None:
            logger.warning('t_inner update exponent not set - defaulting to -0.5')
            t_inner_update_exponent = -0.5

        convergence_config_dict['lock_t_inner_cycles'] = lock_t_inner_cycles
        convergence_config_dict['t_inner_update_exponent'] = t_inner_update_exponent


        montecarlo_config_dict['convergence'] = convergence_config_dict
        ###### END of convergence section reading

        if 'last_no_of_packets' not in montecarlo_section:
            montecarlo_section['last_no_of_packets'] = None

        if 'no_of_virtual_packets' not in montecarlo_section:
            montecarlo_section['no_of_virtual_packets'] = 0

        montecarlo_config_dict.update(montecarlo_section)

        disable_electron_scattering = plasma_section.get('disable_electron_scattering', False)

        if disable_electron_scattering is False:
            logger.info("Electron scattering switched on")
            montecarlo_config_dict['sigma_thomson'] =6.652486e-25 / (u.cm**2)
        else:
            logger.warn('Disabling electron scattering - this is not physical')
            montecarlo_config_dict['sigma_thomson'] = 1e-200 / (u.cm**2)

        montecarlo_config_dict['enable_reflective_inner_boundary'] = False
        montecarlo_config_dict['inner_boundary_albedo'] = 0.0

        if 'inner_boundary_albedo' in montecarlo_section:
            montecarlo_config_dict['inner_boundary_albedo'] = montecarlo_section['inner_boundary_albedo']
            if 'enable_reflective_inner_boundary' not in montecarlo_section:
                logger.warn('inner_boundary_albedo set, however enable_reflective_inner_boundary option not specified '
                            '- defaulting to reflective inner boundary')
                montecarlo_config_dict['enable_reflective_inner_boundary'] = True

            if 'enable_reflective_inner_boundary' in montecarlo_section:
                montecarlo_config_dict['enable_reflective_inner_boundary'] = montecarlo_section['enable_reflective_inner_boundary']
                if montecarlo_section['enable_reflective_inner_boundary'] == True and 'inner_boundary_albedo' not in montecarlo_section:
                    logger.warn('enabled reflective inner boundary, but "inner_boundary_albedo" not set - defaulting to 0.5')
                    montecarlo_config_dict['inner_boundary_albedo'] = 0.5




        if 'black_body_sampling' in montecarlo_section:
            black_body_sampling_section = montecarlo_section.pop('black_body_sampling')
            sampling_start, sampling_end = parse_spectral_bin(black_body_sampling_section['start'],
                                                                                black_body_sampling_section['stop'])
            montecarlo_config_dict['black_body_sampling']['start'] = sampling_start
            montecarlo_config_dict['black_body_sampling']['end'] = sampling_end
            montecarlo_config_dict['black_body_sampling']['samples'] = np.int64(black_body_sampling_section['num'])
        else:
            logger.warn('No "black_body_sampling" section in config file - using defaults of '
                        '50 - 200000 Angstrom (1e6 samples)')
            montecarlo_config_dict['black_body_sampling'] = {}
            montecarlo_config_dict['black_body_sampling']['start'] = 50 * u.angstrom
            montecarlo_config_dict['black_body_sampling']['end'] = 200000 * u.angstrom
            montecarlo_config_dict['black_body_sampling']['samples'] = np.int64(1e6)

        config_dict['montecarlo'] = montecarlo_config_dict
        ##### End of MonteCarlo section






        ##### spectrum section ######
        spectrum_section = raw_dict.pop('spectrum')
        spectrum_config_dict = {}
        spectrum_frequency = parse_quantity_linspace(spectrum_section).to('Hz', u.spectral())

        if spectrum_frequency[0] > spectrum_frequency[1]:
            spectrum_frequency = spectrum_frequency[::-1]

        spectrum_config_dict['start'] = parse_quantity(spectrum_section['start'])
        spectrum_config_dict['end'] = parse_quantity(spectrum_section['stop'])
        spectrum_config_dict['bins'] = spectrum_section['num']


        spectrum_frequency = np.linspace(spectrum_config_dict['end'].to('Hz', u.spectral()).value,
                                                         spectrum_config_dict['start'].to('Hz', u.spectral()).value,
                                                         num=spectrum_config_dict['bins'] + 1) * u.Hz

        spectrum_config_dict['frequency'] = spectrum_frequency.to('Hz')
        config_dict['spectrum'] = spectrum_config_dict




        return cls(config_dict, atom_data)