示例#1
0
def test():
    data = sample_data()
    data = kepdump.load('/home/alex/kepler/test/u8.1#presn')
    x = data.rn[:-1]
    y = data.dn[:-1]
    f = plt.figure()
    ax = f.add_subplot(111)
    set_axvlines(x,
                 axes = ax,
                 color = '#cfcfcf',
                 linewidth = 3.,
                 )
    ax.hlines(y = y[1:],
              xmin = x[:-1],
              xmax = x[1:],
              color = '#ffcfcf',
              linewidth = 3.,
              )
    s = ASpline(x,y, magic = True, smear = True, maxreplace=999)
    set_axvlines(s.x,
                 axes = ax,
                 color = 'k',
                 linewidth = .5,
                 )
    ax.hlines(y = s.y[1:],
              xmin = s.x[:-1],
              xmax = s.x[1:],
              color = 'r',
              linewidth = .5,
              zorder = 10,
              )

    xr = np.array([1.535, 1.56]) * 1.e14
    yr = np.array([0., 1.]) * 2.e-8

    xr = np.array([1.465,1.55])*1.e14
    yr = np.array([0.75,1.45])*1.e-9

    # xr = np.array([0.9,1.1]) *  38795082629.909073
    # yr = np.array([0,10])

    ax.set_xlim(xr)
    ax.set_ylim(yr)

    n = 100
    xx = np.ndarray(n * (s.x.shape[0] - 1) + 1)
    for i in range(s.x.shape[0] - 1):
        xx[n*i:n*(i+1)] = s.x[i] + s.d[i+1] * np.linspace(0,1,n,False)
    xx[-1] = s.x[-1]
    yy = s(xx)
    # xx,yy = reduce.reduce(xx,yy, axes=ax)
    ax.plot(xx,yy,color='g',linewidth=0.5)

    # label_zones(ax, s.x, range(1305, min(1325, len(s.x)-1)))
    label_zones(ax, s.x, range(1005, min(10015, len(s.x)-1)))

    plt.draw()
    return s
示例#2
0
def load_dump(cycle,
              run,
              batch,
              source,
              basename='xrb',
              prefix='',
              verbose=False):
    batch_str = grid_strings.get_batch_string(batch, source)
    run_str = grid_strings.get_run_string(run, basename)
    filename = get_dump_filename(cycle, run, basename, prefix=prefix)

    filepath = os.path.join(MODELS_PATH, batch_str, run_str, filename)
    printv(f'Loading: {filepath}', verbose=verbose)
    return kepdump.load(filepath, graphical=False, silent=True)
示例#3
0
def load_dump(cycle,
              run,
              batch,
              source,
              basename='xrb',
              prefix='',
              verbose=False):
    filename = get_dump_filename(cycle, run, basename, prefix=prefix)
    model_path = grid_strings.get_model_path(run=run,
                                             batch=batch,
                                             source=source,
                                             basename=basename)
    filepath = os.path.join(model_path, filename)
    printv(f'Loading: {filepath}', verbose=verbose)
    return kepdump.load(filepath, graphical=False, silent=True)
示例#4
0
文件: ionmap.py 项目: earnric/modules
def test_tdep_dec_abuset():
    import bdat
    import kepdump

    np.seterr(all = 'warn')
    k = kepdump.load('/home/alex/kepler/test/z15D#nucleo')
    a = k.abub
    b = bdat.BDat('/home/alex/kepler/local_data/bdat').decaydata
    d = TimedDecay(ions = a, decaydata = b)

    import matplotlib.pylab as plt

    f = plt.figure()
    ax = f.add_subplot(111)
    for x in np.arange(4,9, 0.5):
        t = 10**x
        s = time2human(t)
        ax.plot(k.zm_sun, d(a, time = t).ion_abu('co56'), label = s)
    ax.set_xlabel('mass coordinate / solar masses')
    ax.set_ylabel('mass fraction')
    leg = ax.legend(loc='best')
    leg.draggable()
    ax.set_xlim(1.4, 1.6)
示例#5
0
def check_finished(batches,
                   source,
                   efficiency=True,
                   show='all',
                   basename='xrb',
                   extension='z1',
                   **kwargs):
    """Checks which running models are finished

    t_end      =  flt  : end-time of the simulations
    basename   =  str  : prefix for individual model names
    extension  =  str  : suffix of kepler dump
    efficiency = bool  : print time per 1000 steps
    all        = str   : which models to show, based on their progress,
                    one of (all, finished, not_finished, started, not_started)
    (path      =  str  : path to location of model directories)

    Notes
    -----
    timeused gets reset when a model is resumed,
        resulting in unreliable values in efficiency
    """
    def progress_string(batch, basename, run, progress, elapsed, remaining,
                        eff_str, eff2_str):
        string = [
            f'{batch}    {basename}{run:02}  {progress:.0f}%   ' +
            f'{elapsed:.0f}hrs     ~{remaining:.0f}hrs,    ' +
            f'{eff_str},    {eff2_str}'
        ]
        return string

    def shorthand(string):
        map_ = {
            'a': 'all',
            'ns': 'not_started',
            'nf': 'not_finished',
            'f': 'finished'
        }
        if string not in map_:
            if string not in map_.values():
                raise ValueError("invalid 'show' parameter")
            return string
        else:
            return map_[string]

    source = grid_strings.source_shorthand(source=source)
    show = shorthand(show)
    batches = expand_batches(batches=batches, source=source)

    print_strings = []
    print_idx = {
        'finished': [],
        'not_finished': [],
        'started': [],
        'not_started': []
    }
    for batch in batches:
        n_runs = get_nruns(batch=batch, source=source)
        print_strings += [f'===== Batch {batch} =====']

        for run in range(1, n_runs + 1):
            run_str = grid_strings.get_run_string(run, basename)
            run_path = grid_strings.get_model_path(run,
                                                   batch,
                                                   source,
                                                   basename=basename)
            string_idx = len(print_strings)

            filename = f'{run_str}{extension}'
            filepath = os.path.join(run_path, filename)

            # ===== get t_end from cmd file =====
            cmd_file = f'{run_str}.cmd'
            cmd_filepath = os.path.join(run_path, cmd_file)

            t_end = None
            try:
                with open(cmd_filepath) as f:
                    lines = f.readlines()

                marker = '@time>'
                for line in lines[-10:]:
                    if marker in line:
                        t_end = float(line.strip('@time>').strip())
                        break

                kmodel = kepdump.load(filepath)
                progress = kmodel.time / t_end
                timeused = kmodel.timeused[0][-1]  # CPU time elapsed
                ncyc = kmodel.ncyc  # No. of time-steps
                remaining = (timeused / 3600) * (1 - progress) / progress

                if efficiency:
                    eff = (timeused / (ncyc / 1e4)) / 3600  # Time per 1e4 cyc
                    eff2 = timeused / kmodel.time
                    eff_str = f'{eff:.1f} hr/10Kcyc'
                    eff2_str = f'{eff2:.2f} walltime/modeltime'
                else:
                    eff_str = ''
                    eff2_str = ''

                # ===== Tracking model progress =====
                print_idx['started'] += [string_idx]

                if f'{remaining:.0f}' == '0':
                    print_idx['finished'] += [string_idx]
                else:
                    print_idx['not_finished'] += [string_idx]
            except:
                progress = 0
                timeused = 0
                remaining = 0
                eff_str = ''
                eff2_str = ''

                print_idx['not_started'] += [string_idx]

            progress *= 100
            elapsed = timeused / 3600
            print_strings += progress_string(batch=batch,
                                             basename=basename,
                                             run=run,
                                             progress=progress,
                                             elapsed=elapsed,
                                             remaining=remaining,
                                             eff_str=eff_str,
                                             eff2_str=eff2_str)

    print_idx['all'] = np.arange(len(print_strings))

    print_dashes()
    print('Batch  Model       elapsed  remaining')
    for i, string in enumerate(print_strings):
        if i in print_idx[show]:
            print(string)
def data(dbfilename=os.path.expanduser(
    '~/python/project/znuc2012.S4.star.el.y.stardb.gz')):
    """
    This is the main data collecting module which gets every single isotope/remnant mass from the database which is later used to interpolate from to obtain desired values
    """
    db = stardb.load(dbfilename)  # loads database
    nmass = db.nvalues[0]  # finds the number of values
    masses = db.values[0][:nmass]  #creates a vector of the initial masses
    isodb = stardb.load(
        os.path.expanduser(
            '~/python/project/znuc2012.S4.star.deciso.y.stardb.gz'))

    massnumber = []
    for x in range(len(isodb.ions)):
        mn = isodb.ions[x].A
        massnumber.append(mn)
    massnumber = np.array(massnumber)
    np.save(os.path.expanduser('~/python/project/filestoload/Massnumber'),
            massnumber)
    #######################
    # write all energy and mixing values

    energyvalues = np.unique(db.fielddata['energy'])
    mixingvalues = np.unique(db.fielddata['mixing'])
    masterremnant = []  # result will be a multidimensional array
    elementdata = []
    isodata = []
    r = len(db.ions)  # for loop iteration
    w = len(isodb.ions)
    for energy in energyvalues:
        remmixingarray = []  # reinitialise the next dimension
        elmixingarray = []
        isomixingarray = []
        for mixing in mixingvalues:

            ii = np.logical_and(np.isclose(db.fielddata['energy'], energy),
                                np.isclose(db.fielddata['mixing'], mixing))

            mass = db.fielddata[ii]['remnant']
            remmixingarray.append(
                mass
            )  # this is an array of remnant masses for one energy and every mixing value

            elfill = []  # reinitialise the next dimension again
            isofill = []

            for m in range(w):

                a = isodb.ions[m]  #for obtaining the element string
                kk = np.where(
                    isodb.ions == isotope.ion(a)
                )  # finding the indices in db.ions for a particular element
                jj = np.where(ii)
                isotopes = isodb.data[jj, kk][
                    0]  # array of abundances for that particular element
                isofill.append(
                    isotopes
                )  # this is an array of element data for every mass for one energy and one mixing value

            isomixingarray.append(isofill)

        masterremnant.append(
            remmixingarray
        )  # these master arrays have every bit of data under its own energy. so called like elementdata[energy][mixing][elementnumber] gives the element data for every star for a single element.

        isodata.append(isomixingarray)

    np.save(os.path.expanduser('~/python/project/filestoload/IsoData'),
            isodata)
    np.save(os.path.expanduser('~/python/project/filestoload/RemnantMasses'),
            masterremnant)
    np.save(os.path.expanduser('~/python/project/filestoload/Ioninfo'),
            isodb.ions)
    time = []

    for mass in masses:  # for loop will cycle through the masses and grab the lifetime of each star
        s = str(
            mass)  # converts the mass number to a string for file acquiring
        if s.endswith('.0'):  # formatting issue, to match the filenames
            s = s[:-2]
        filename = os.path.expanduser(
            '~/python/project/dumps/z{}#presn').format(s)
        # grabs filename corrosponding to this mass
        d = kepdump.load(filename)  # loads the kepdump data for this star
        time.append(d.time)
    yr = 365.2425 * 86400
    time = np.array(time) / yr
    dataarray = [masses, time]

    return dataarray
    def __init__(self,
                 dbfilename=os.path.expanduser(
                     '~/python/project/znuc2012.S4.star.el.y.stardb.gz'),
                 efunc=sn_energy_default,
                 reload=False):
        """
        init will check if the module has been run before and if so, will
        load all required files and data.  If not, it will have to
        load the database and do all required interpolation and
        function solving which will take ~30 seconds. Applying
        reload=True will force the program to reload, do if energyfunc
        or mixing func was changed by the user

        """
        save_path = os.path.expanduser('~/python/project/filestoload/')
        output_path = os.path.expanduser('~/python/project/outputfiles/')
        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        if (not reload) and os.path.isfile(
                os.path.expanduser(
                    '~/python/project/filestoload/Energyvalues.npy')):
            energyvalues = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/Energyvalues.npy'))

        if (not reload) and os.path.isfile(
                os.path.expanduser(
                    '~/python/project/filestoload/Mixingvalues.npy')):
            mixingvalues = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/Mixingvalues.npy'))
            # checks if energy/mixing values file exists to avoid reloading
        else:
            d = stardb.load(dbfilename)
            energyvalues = np.unique(d.fielddata['energy'])
            mixingvalues = np.unique(d.fielddata['mixing'])

            np.save(
                os.path.expanduser(
                    '~/python/project/filestoload/Energyvalues.npy'),
                energyvalues)
            np.save(
                os.path.expanduser(
                    '~/python/project/filestoload/Mixingvalues.npy'),
                mixingvalues)

        self.i = 0  # initialise the checker for data

        if (not reload) and os.path.isfile(
                os.path.expanduser(
                    '~/python/project/filestoload/IsoData.npy')):
            self.isodata = np.load(
                os.path.expanduser('~/python/project/filestoload/IsoData.npy'))
            self.remnantmasses = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/SpecificRem.npy'))
            self.isotopes = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/SpecificIso.npy'))
            self.massnumber = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/Massnumber.npy'))
            self.data = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/ProjectData.npy'))
            self.isoinfo = np.load(
                os.path.expanduser('~/python/project/filestoload/Ioninfo.npy'))
            self.time = self.data[1]
            self.masses = self.data[0]
            self.explodemass = self.data[2]
            self.massfrac = self.data[3]
            self.egy = np.load(
                os.path.expanduser('~/python/project/filestoload/egy.npy'))
        else:
            self.data = np.array(makedata.data(dbfilename))
            self.i += 1  # need to run romberg and save data as data doesn't exist

            self.isodata = np.load(
                os.path.expanduser('~/python/project/filestoload/IsoData.npy'))
            self.isoinfo = np.load(
                os.path.expanduser('~/python/project/filestoload/Ioninfo.npy'))

            self.massnumber = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/Massnumber.npy'))
            self.time = self.data[1]
            self.masses = self.data[0]
            rem = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/RemnantMasses.npy'))

            self.remnantmasses = []
            rem = np.load(
                os.path.expanduser(
                    '~/python/project/filestoload/RemnantMasses.npy'))

            self.isotopes = []
            self.egy = []

            for starcount in range(len(
                    self.masses)):  # loop through each star individually

                s = str(self.masses[starcount])
                if s.endswith(
                        '.0'):  # formatting issue, to match the filenames
                    s = s[:-2]
                filename = os.path.expanduser(
                    '~/python/project/dumps/z{}#presn').format(s)
                # grabs filename corrosponding to this mass
                d = kepdump.load(filename)
                energy = efunc(d)

                self.egy.append(energy)
                mixing = mixfunc(energy)
                remnantmass = remnantobtain.remnant(energyvalues, mixingvalues,
                                                    energy, mixing, rem,
                                                    starcount)
                # this interpolates to grab the correct remnant mass

                isoarray = []
                for isotopecount in range(self.isodata.shape[2]):
                    k = elementobtain.element(energyvalues, mixingvalues,
                                              energy, mixing, self.isodata,
                                              starcount, isotopecount)
                    # interpolates to find the correct isotope ejecta
                    isoarray.append(k)
                if energy == 0.0:  # this includes the start but no explosion/ejecta
                    remnantmass = self.masses[starcount]
                    isoarray = np.zeros(np.array(isoarray).shape)

                self.isotopes.append(isoarray)
                self.remnantmasses.append(remnantmass)

            reshape = np.array(self.isotopes)
            self.isotopes = np.swapaxes(reshape, 0, 1)
            self.remnantmasses = np.array(self.remnantmasses)

            self.explodemass = self.masses - self.remnantmasses
            self.massfrac = self.explodemass / self.masses
            np.save(
                os.path.expanduser('~/python/project/filestoload/SpecificRem'),
                self.remnantmasses)
            np.save(
                os.path.expanduser('~/python/project/filestoload/SpecificIso'),
                self.isotopes)
            np.save(os.path.expanduser('~/python/project/filestoload/egy'),
                    self.egy)
            np.save(os.path.expanduser('~/python/project/filestoload/oldimf'),
                    0)
        print(
            'Please run the enterIMF(IMF=...) function. Salpeter is the default IMF'
        )
示例#8
0
def test():
    data = sample_data()
    data = kepdump.load('/home/alex/kepler/test/u8.1#presn')
    x = data.rn[:-1]
    y = data.dn[:-1]
    f = plt.figure()
    ax = f.add_subplot(111)
    set_axvlines(
        x,
        axes=ax,
        color='#cfcfcf',
        linewidth=3.,
    )
    ax.hlines(
        y=y[1:],
        xmin=x[:-1],
        xmax=x[1:],
        color='#ffcfcf',
        linewidth=3.,
    )
    s = ASpline(x, y, magic=True, smear=True, maxreplace=999)
    set_axvlines(
        s.x,
        axes=ax,
        color='k',
        linewidth=.5,
    )
    ax.hlines(
        y=s.y[1:],
        xmin=s.x[:-1],
        xmax=s.x[1:],
        color='r',
        linewidth=.5,
        zorder=10,
    )

    xr = np.array([1.535, 1.56]) * 1.e14
    yr = np.array([0., 1.]) * 2.e-8

    xr = np.array([1.465, 1.55]) * 1.e14
    yr = np.array([0.75, 1.45]) * 1.e-9

    # xr = np.array([0.9,1.1]) *  38795082629.909073
    # yr = np.array([0,10])

    ax.set_xlim(xr)
    ax.set_ylim(yr)

    n = 100
    xx = np.ndarray(n * (s.x.shape[0] - 1) + 1)
    for i in range(s.x.shape[0] - 1):
        xx[n * i:n *
           (i + 1)] = s.x[i] + s.d[i + 1] * np.linspace(0, 1, n, False)
    xx[-1] = s.x[-1]
    yy = s(xx)
    # xx,yy = reduce.reduce(xx,yy, axes=ax)
    ax.plot(xx, yy, color='g', linewidth=0.5)

    # label_zones(ax, s.x, range(1305, min(1325, len(s.x)-1)))
    label_zones(ax, s.x, range(1005, min(10015, len(s.x) - 1)))

    plt.draw()
    return s