Exemplo n.º 1
0
def cluster_initial_models(Mclus=10**4,
                           write_csv=False,
                           output_folder='output_cluster',
                           exec_name='zams'):
    '''
    Generates the initial model (cluster a t=0). Gives the overall parameters
    (we don't care about profiles in this part).
    D. Vallés

    Parameters:
    Mclus: mass of the cluster one wants to generate
    write_csv: (default: False) whether one wants to save the values as a csv.
    output_folder: where to save the outputs
    exec_name: name of the executable file for the simulation
    '''
    ## We pick the masses in ZAMS
    #we generate a large number of sampled masses from the Salpeter IMF
    print(' Generating the IMF...')
    random_masses = np.array(
        list(metropolis(salpeter_xi, 10**6, uniform_proposal)))
    #and we start picking masses until we have 10**4
    print('Done! \n Filling our ZAMS...')
    masses = [np.random.choice(random_masses)]
    while sum(masses) < Mclus:
        masses.append(np.random.choice(random_masses))
    #we deal the best we can do with the last one:
    #our criterion for choosing if we keep or not the last one is the following:
    #p = (mass we were missing before adding the last one)/(last mass)
    p = (Mclus - sum(masses)) / masses[-1] + 1
    if np.random.rand() > p:
        masses.pop()

    print('Done! \n Generating the model guesses...')
    models = several_stars_guess(masses)
    failed = []
    parameters = []

    os.system('mkdir ' + output_folder)
    os.system('cp ' + exec_name + ' ./' + output_folder)
    os.chdir(output_folder)

    print('Done! \n Starting the siulations...')
    for idx, model in enumerate(models):
        print('ZAMS: Starting star no. {:04d} of {:04d}'.format(
            idx, len(models)))
        gm.write_pars_file(filename='custom_pars.pars',
                           mass=model[0],
                           x=0.7381,
                           y=0.2485,
                           p=model[1],
                           T=model[2],
                           R=model[3],
                           L=model[4],
                           output_filename='custom_output.dat')
        gm.launch_model(parameters_filename='custom_pars.pars',
                        exec_name=exec_name)
        if not gm.has_succeeded(output_filename='custom_output.dat'):
            failed.append(idx)
            print('Failed!! :C')
        else:
            parameters.append(rd.read_params('custom_output.dat'))
            totaleps = integrate_nuclear_production('custom_output.dat',
                                                    model[0])
            Hburningrate = how_much_hydrogen_burnt(totaleps)
            parameters[-1].append(Hburningrate)
            EstimatedlifetimeZAMS = estimate_lifetime(0.7381, model[0],
                                                      Hburningrate)
            parameters[-1].append(EstimatedlifetimeZAMS)

    print(np.array(parameters).shape)
    print('The following have failed: {}.'.format(failed))
    missingmass = sum([masses[i] for i in failed])
    if missingmass > Mclus / 10**3:
        print('We re-run for the missing mass:')
        parameters.extend(cluster_initial_models(Mclus=missingmass))
        os.system('rm -r ./' + output_folder)
        print(np.array(parameters).shape)

    os.system('rm ./' + exec_name)
    os.system('rm ./' + 'custom_pars.pars')
    os.system('rm ./' + 'custom_output.dat')

    if write_csv:
        values = np.array(parameters)
        column_names = [
            'm', 'x', 'y', 'p', 'Tc', 'R', 'Lergs', 'Teff', 'Lsununits',
            'Hburningrate', 'EstimatedlifetimeZAMS'
        ]
        pd.DataFrame(values).to_csv('initial.csv',
                                    sep=',',
                                    index=False,
                                    header=column_names)
    os.chdir('..')

    return parameters
Exemplo n.º 2
0
def time_evolution(parameters,
                   timestep,
                   itnum,
                   write_csv=False,
                   output_folder='output_cluster',
                   exec_name='zams'):
    '''
    Work in progress!!!
    Returns the time-evolved models, considering the change in composition
    due to nuclear burning.
    D. Vallés
    '''
    #we update the compositions
    print('Starting it. {}. Updating compositions...'.format(itnum))
    for idx in range(len(parameters)):
        M, x, y, Hburningrate = parameters[idx][0], parameters[idx][
            1], parameters[idx][2], parameters[idx][9]
        parameters[idx][1], parameters[idx][2] = update_composition(
            x, y, M, Hburningrate, timestep)

    failed = []
    newparameters = []

    os.system('mkdir ' + output_folder)
    os.system('cp ' + exec_name + ' ./' + output_folder)
    os.chdir(output_folder)

    print('Done! \n Starting the siulations...')
    for idx, model in enumerate(parameters):
        print('It. num. {}: Starting star no. {:04d} of {:04d}'.format(
            itnum, idx, len(parameters)))
        gm.write_pars_file(filename='custom_pars.pars',
                           mass=model[0],
                           x=model[1],
                           y=model[2],
                           p=model[3],
                           T=model[4],
                           R=model[5],
                           L=model[8],
                           output_filename='custom_output.dat')
        gm.launch_model(parameters_filename='custom_pars.pars',
                        exec_name=exec_name)
        if not gm.has_succeeded(output_filename='custom_output.dat'):
            failed.append(idx)
            print('Failed!! :C')
        else:
            newparameters.append(rd.read_params('custom_output.dat'))
            totaleps = integrate_nuclear_production('custom_output.dat',
                                                    newparameters[-1][0])
            Hburningrate = how_much_hydrogen_burnt(totaleps)
            newparameters[-1].append(Hburningrate)
            EstimatedlifetimeZAMS = estimate_lifetime(newparameters[-1][1],
                                                      newparameters[-1][0],
                                                      Hburningrate)
            parameters[-1].append(EstimatedlifetimeZAMS)

    print(np.array(newparameters).shape)
    print('The following have failed: {}.'.format(failed))

    os.system('rm ./' + exec_name)
    os.system('rm ./' + 'custom_pars.pars')
    os.system('rm ./' + 'custom_output.dat')

    if write_csv:
        values = np.array(newparameters)
        column_names = [
            'm', 'x', 'y', 'p', 'Tc', 'R', 'Lergs', 'Teff', 'Lsununits',
            'Hburningrate', 'EstimatedlifetimeZAMS'
        ]
        pd.DataFrame(values).to_csv('iteration_{}.csv'.format(itnum),
                                    sep=',',
                                    index=False,
                                    header=column_names)
    os.chdir('..')

    return newparameters
def full_sim(output_dir, M_list, x, y):
    '''
    make a stellar evolution model 
    extract the profile and the parameter
    save all outputs in output_dir (*.csv, dat*)
    
    Arguments:
    output_dir: name of the dir which will be saved in ./
    M_list: list of mass guesses for the simulation
    x, y: metallicity (H and He) portions
    
    M. Batzer
    '''
    #interpolation for initial values
    lista_guess = cluster.several_stars_guess(M_list)

    #create or check existence of output_dir
    try:
        os.mkdir(output_dir)
        print("Directory ", output_dir, " created ")
    except FileExistsError:
        print("Directory ", output_dir, " already exists")

    #copy zams.f file in directory and make it executable
    sh.copyfile('./' + 'zams.f', output_dir + 'zams.f')
    os.system('gfortran -o ' + output_dir + 'zams ' + output_dir + 'zams.f')

    #write *.pars files in ./output_dir
    i = 0
    for guess in lista_guess:
        gm.write_pars_file('pars_{:04}.pars'.format(i), output_dir, guess[0],
                           x, y, guess[1], guess[2], guess[3], guess[4],
                           'output_{:04}.dat'.format(i))
        i = i + 1

    #launch model with *.pars files and save in ./
    for pars in os.listdir(output_dir):
        if pars.endswith('.pars'):
            #print(dirName + pars)
            gm.launch_model(output_dir + pars, output_dir + 'zams')

    #move *.dat files in outout_dir
    for output in os.listdir('./'):
        if output.endswith('.dat'):
            os.replace(output, output_dir + output)

    #check succes of the model
    for file in os.listdir(output_dir):
        if file.endswith('.dat'):
            if gm.has_succeeded(file, output_dir) == False:
                print(file, 'with mass', round(M_list[int(file[7:11])], 3),
                      gm.has_succeeded(file, output_dir))

    #read profiles from *.dat file
    for file in os.listdir(output_dir):
        if file.endswith('.dat'):
            if gm.has_succeeded(file, output_dir) == True:
                rd.read_output(file, output_dir)

    #create csv of profile
    for file in os.listdir(output_dir):
        if file.endswith('.dat'):
            if gm.has_succeeded(file, output_dir) == True:
                rd.write_csv(file, output_dir)

    #read the parameters from *.dat file
    a_lista = []
    for file in os.listdir(output_dir):
        if file.endswith('.dat'):
            if gm.has_succeeded(file, output_dir) == True:
                a_n = rd.read_params(file, output_dir)
                a_lista.append(a_n)

    para_all = np.array(a_lista)
    para_all = para_all[para_all[:, 0].argsort()]

    #Create csv of parameter
    pd.DataFrame(para_all).to_csv(
        output_dir + 'para_all.csv',
        sep=",",
        index=False,
        header=['m_sun', 'x', 'y', 'Pc', 'Tc', 'R', 'L', 'Teff', 'L/L_sun'])
    return para_all