Exemple #1
0
def test_working_dir():
    cwd = Path(os.getcwd()).abspath()

    p = project.Project(name='')
    try:
        assert p.name
        assert str(cwd) != str(p.dirname)
        assert (cwd / p.name).exists()
        assert (cwd / p.name).isdir()
        for d in ('input', 'output'):
            assert (p.dirname / d).exists()
            assert (p.dirname / d).isdir()
        assert (p.dirname / 'which_output_files.csv').exists()

        p.deactivate()
        whereIam = str(Path(os.getcwd()).abspath())
        assert str(cwd) == whereIam, str(cwd) + ' / ' + whereIam

        p.activate()
    except:
        raise
    finally:
        p.remove(force=True)
    assert (cwd / p.name).exists() is False

    p = project.Project()
    p.deactivate()
    pp = project.Project(name=p.name)
    try:
        assert str(pp.dirname) == str(p.dirname)
    except:
        raise
    finally:
        pp.remove(force=True)
def test_shift_in_light():

    #assert : the difference between the light intercepted by one plant on
    # one day and the light intercepted by that same plant the next day
    # (PAR_per_axes.csv output file ; value for Sum_PAR) is always less
    # than 1000% (or x10)
    p = project.Project(name='shift_in_light')
    try:
        # run the reference simulation
        lsys, lstring = p.run_parameters('sim_scheme_test.csv')
        PAR_per_axes_dico = lsys.context().locals()['PAR_per_axes_dico']
        df = pandas.DataFrame(PAR_per_axes_dico)
        df['relative_Inc_PAR'] = df['Inc_PAR'] / df['Inc_PAR'].mean()
        df['relative_Sum_PAR'] = df['Sum_PAR'] / df['relative_Inc_PAR']

        def _max_variation(x):
            variation = (x.relative_Sum_PAR.diff().abs() /
                         x.relative_Sum_PAR)[1:] * 100
            return variation.max()

        # test max variation per plante
        relative_variation = df.groupby([
            'Num_plante', 'Elapsed_time'
        ]).agg('sum').reset_index().groupby('Num_plante').apply(_max_variation)

        assert all(relative_variation < 50)
    except:
        raise
    finally:
        p.remove(force=True)
def projecion_screen_tuning():
    p = project.Project(name='projection_screen_tuning')
    # run the reference simulation
    lsys, lstring = p.run_parameters('sim_scheme_test.csv')
    caribu_recorder = lsys.context().locals()['caribu_recorder']
    df = pandas.DataFrame(caribu_recorder.records_data())
    p.remove(force=True)
Exemple #4
0
def test_unedfined_Ln_final():
    p = project.Project()
    try:
        param = p.csv_parameters('sim_scheme_test_undefined_ln_final.csv')[0]
        lsys, lstring = p.run(**param)
        assert lstring
    except:
        raise
    finally:
        p.remove(force=True)
Exemple #5
0
def test_bug_Ln_final():
    p = project.Project()
    try:
        param = p.csv_parameters('sim_scheme_test.csv')[0]
        param.update(dict(Ln_final_Maxwell=20.3, nbj=120))
        lsys, lstring = p.run(**param)
        assert lstring
    except:
        raise
    finally:
        p.remove(force=True)
Exemple #6
0
def test_direct_run():
    p = project.Project()
    lsys, lstring = p.run(nb_plt_utiles=1,
                          dist_border_x=0,
                          dist_border_y=0,
                          nbj=30,
                          beginning_CARIBU=290)
    s = lsys.sceneInterpretation(lstring)
    p.remove(force=True)
    assert len(lstring) > 10
    assert len(s) > 0
Exemple #7
0
def test_modified_parameters():
    reset_call_dir()
    p = project.Project()
    try:
        param = p.csv_parameters('sim_scheme_fast_test.csv')[0]
        param.update(dict(nbj=40))
        lsys, lstring = p.run(**param)
        assert lstring
        assert p.combi_params.nbj[0] == 40
    except:
        raise
    finally:
        p.remove(force=True)
def shift_in_light_bug():
    """Inspect the shift in light bug """
    p = project.Project(name='shift_in_light')
    params = p.csv_parameters('sim_scheme_test.csv')[0]
    params.update(dict(nbj=225))
    lsys, lstring = p.run(**params)
    res_sky = get_res_sky(lsys, lstring)
    dd = pandas.DataFrame(lsys.context().locals()['PAR_per_organ'])
    dfag = dd.groupby(['Num_plante', 'Num_talle']).agg({
        'Organ_surface': 'sum',
        'tiller_surface': 'mean'
    })
    dfag.tiller_surface / dfag.Organ_surface
Exemple #9
0
def test_which_outputs():
    p = project.Project()
    try:
        output = p.which_outputs
        assert output
        assert output['Apex']
        p.which_outputs = {'Apex': 0}
        new_output = p.which_outputs
        assert not new_output['Apex']
        assert len(new_output) == len(output)  # all outputs flag are required
    except:
        raise
    finally:
        p.remove(force=True)
Exemple #10
0
def test_which_parameters():
    reset_call_dir()
    p = project.Project()
    try:
        lsys, lstring = p.run_parameters('sim_scheme_fast_test2.csv', which=0)
        assert lstring
        assert len(p.combi_params) == 1
        lsys, lstring = p.run_parameters('sim_scheme_fast_test2.csv',
                                         which=[1, 2])
        assert lstring
        assert len(p.combi_params) == 3
    except:
        raise
    finally:
        p.remove(force=True)
Exemple #11
0
def test_same_result(debug=False):
    #assert : verify if the same wheat field have the same results for the same chosen parameters.

    p = project.Project(name='same') # Create the simulation directory
    try:
        directory = project.walter_data()
        params = p.csv_parameters(str(directory/'sim_scheme_ref.csv'))[0] # recovery of parameters
        p.run(**params)
        _compare_list_of_files(p)
        _compare_file_content(p)
    except:
        raise
    finally:
        if debug:
            return p
        else:
            p.remove(force=True)
Exemple #12
0
def change_reference():

    p = project.Project(name='same') # Create the simulation directory
    directory = project.walter_data()
    params = p.csv_parameters(str(directory/'sim_scheme_ref.csv'))[0] # recovery of parameters
    p.run(**params)
    reference_directory = get_data_dir() + '/ref_output'  # Reference folder
    if os.path.isdir(reference_directory):
        shutil.rmtree(reference_directory)
    result_directory = str(p.output_path()) + '/'
    list_of_result = os.listdir(result_directory)
    os.mkdir(Path(project.walter_data()/'ref_output'))
    for i in list_of_result:
        shutil.move(result_directory+i, reference_directory+'/'+i)
        print(result_directory+i, "has being moved into reference directory ")

    p.remove(force=True)
Exemple #13
0
def test_run_parameters():
    reset_call_dir()
    p = project.Project()
    try:
        lsys, lstring = p.run_parameters('sim_scheme_fast_test.csv')
        s = lsys.sceneInterpretation(lstring)
        assert len(lstring) > 10
        assert len(s) > 0
        lsys, lstring = p.run_parameters('sim_scheme_fast_test2.csv')
        s = lsys.sceneInterpretation(lstring)
        assert len(lstring) > 10
        assert len(s) > 0
        assert len(p.combi_params) == 3
    except:
        raise
    finally:
        p.remove(force=True)
def test_infinite():
    p = project.Project(name='infinite_canopy')
    try:
        params = p.csv_parameters('sim_scheme_test.csv')[0]
        params.update(
            dict(nb_plt_utiles=1,
                 dist_border_x=0,
                 dist_border_y=0,
                 nbj=53,
                 infinity_CARIBU=1,
                 beginning_CARIBU=290))
        lsys, lstring = p.run(**params)
        crop_scheme = lsys.context().locals()['crop_scheme']
        pattern = scene_pattern(crop_scheme)
        assert pattern[0] > 1
    except:
        raise
    finally:
        p.remove(force=True)
def test_check_light_balance():
    """test if total par intercepted is above or below incident par"""
    p = project.Project(name='light_balance')
    try:
        params = p.csv_parameters('sim_scheme_test.csv')[0]
        params.update(dict(write_debug_PAR=True, infinity_CARIBU=1))
        lsys, lstring = p.run(**params)
        crop_scheme = lsys.context().locals()['crop_scheme']
        # do we really need debug par dico df ? (simulation time is extremly long !)
        df = pandas.DataFrame(lsys.context().locals()['Debug_PAR_dico_df'])
        control = df.groupby('Elapsed_time').agg({
            'Organ_PAR': 'sum',
            'Inc_PAR': 'mean'
        })
        balance = control.Organ_PAR / control.Inc_PAR / crop_scheme[
            'surface_sol']
        assert all(balance <= 1)
    except:
        raise
    finally:
        p.remove(force=True)
Exemple #16
0
def test_combi_params():
    p = project.Project()
    try:
        assert len(p.combi_params) == 0

        p.run(dry_run=True)
        combi = p.combi_params
        assert len(combi) == 1
        assert combi.ID[0] == 'walter_defaults'

        p.run(nbj=30, dry_run=True)
        combi = p.combi_params
        assert len(combi) == 2
        assert combi.ID[0] == 'walter_defaults'
        assert combi.nbj[1] == 30

        # repeat run: same id
        p.run(nbj=30, dry_run=True)
        combi = p.combi_params
        assert len(combi) == 2
        assert combi.ID[0] == 'walter_defaults'
        assert combi.nbj[1] == 30

        path = 'sim_scheme_test.csv'
        p.run_parameters(path, dry_run=True)
        combi = p.combi_params
        assert len(combi) == 3
        assert len(combi.columns) == 27

        # repeat
        p.run_parameters(path, dry_run=True)
        combi = p.combi_params
        assert len(combi) == 3
        assert len(combi.columns) == 27
    except:
        raise
    finally:
        p.remove(force=True)
def test_zero_light():

    #assert : all tillers receive light (value > 0 for Sum_PAR in the PAR_per_axes.csv output file)

    p = project.Project(name='zero_light')  #Create Folder
    try:
        params = p.csv_parameters('sim_scheme_test.csv')[
            0]  #Recover list of parameters
        params.update(dict(nb_plt_temp=50, nb_rang=10,
                           nbj=156))  #Add new parameters
        lsys, lstring = p.run(**params)
        PAR_per_axes_dico = lsys.context().locals()['PAR_per_axes_dico']
        df = pandas.DataFrame(PAR_per_axes_dico)
        PAR = df.groupby('Num_plante').agg('sum')['Sum_PAR'].values
        assert all(PAR > 0)
        outdir = p.output_path()
        dfout = pandas.read_csv(outdir / 'PAR_per_axes.csv', sep='\t')
        PARout = df.groupby('Num_plante').agg('sum')['Sum_PAR'].values
        assert all(PARout > 0)
    except:
        raise
    finally:
        p.remove(force=True)
Exemple #18
0
def main():

    parser = walter_parser()
    args = parser.parse_args()

    if args.p == '.':  # check '.' is walter-like (in case user has  forgotten -p)
        if not project.check_cwd():
            answer = raw_input(
                "Current directory doesn't look like a walter project dir, Continue ? [Y/N]"
            )
            if answer != 'Y':
                return

    prj = project.Project(args.p)

    # TODO: add a flag in the project to know if the project has been generated, modified or not.
    if prj.dirname.exists():
        print('Use Project %s located at %s' % (prj.name, prj.dirname))
    else:
        print('Project %s has been generated at %s' % (prj.name, prj.dirname))

    if args.i:  # -i has been set
        sim_scheme = args.i
        if sim_scheme == 'walter_default':  # walter command called with dry -i args
            prj.run(dry_run=args.dry_run)
        else:
            param_list = prj.csv_parameters(sim_scheme)
            if len(param_list) == 1:
                prj.run(dry_run=args.dry_run, **(param_list[0]))
            else:
                print('Multiple processes')
                print('generate ids')
                prj.run_parameters(sim_scheme, dry_run=True)
                print('run simulations')
                tmp = prj.dirname / 'tmp'
                if not tmp.exists():
                    tmp.mkdir()
                pids = []
                procs = {}
                active_procs = []
                for i, pdict in enumerate(param_list):
                    ############################################# temporary fix #############################################################################################
                    while len(
                            active_procs
                    ) > 2:  # As long as there are 3 active_procs, test if one ends : temporary fix to avoid running too many processes at the same time
                        active_procs = [
                            proc for proc in active_procs
                            if proc.poll() == None
                        ]
                        #time.sleep(300) # To avoid testing for finished processes too often, wait 5 minutes between loops
                    df = pd.DataFrame.from_dict(data=[pdict], orient='columns')
                    scheme_name = str(tmp / 'sim_scheme_%d.csv' % (i + 1))
                    df.to_csv(path_or_buf=scheme_name, sep='\t', index=False)
                    prj.activate()
                    pid = Popen(["walter", "-i", scheme_name])
                    pids.append(pid)
                    procs[scheme_name] = pid
                    active_procs.append(pid)
                ############################################# temporary fix #############################################################################################
                # Test caribuRunError re-launching : temporary fix until CaribuRunErrors are solved
                while len(procs) > 0:  # While there are processes to test
                    for scheme in procs.keys(
                    ):  #Not using iteritems because you cannot change the size of a dictionary while iterating on it
                        if procs[scheme].poll(
                        ) != None:  # If the proces is finished
                            procs.pop(scheme)  # Remove this proc from procs
                            param_list_dict = prj.csv_parameters(scheme)
                            sim_id = prj.get_id(
                                param_list_dict[0])  # Get the ID
                            if os.path.exists(
                                    prj.dirname + "/output/" + sim_id +
                                    "/error_caribu.txt"
                            ):  # Check if the file error_caribu.txt has been generated
                                shutil.rmtree(
                                    prj.dirname + "/output/" +
                                    sim_id)  # Supress the output directory
                                ex_rep = param_list_dict[0][
                                    "rep"]  # Get the rep (random seed) used for the simulation
                                param_list_dict[0].update(
                                    rep=ex_rep + 1
                                )  # Update the sim_scheme with a new seed to re-launch the simulation
                                df = pd.DataFrame.from_dict(
                                    data=param_list_dict, orient='columns')
                                df.to_csv(
                                    path_or_buf=scheme, sep='\t', index=False
                                )  # Create the csv file sim_scheme to launch the simulation
                                p = Popen(["walter", "-i",
                                           scheme])  # Launch the simulation
                                prj.itable[sim_id] = param_list_dict[
                                    0]  # updating combi_param
                                prj.update_itable()
                                pids.append(
                                    p
                                )  # Add the new process to the list of processes for futher testing
                                procs[
                                    scheme] = p  # Add the new process to the dict of processes
                    #time.sleep(120) # To avoid testing for finished processes too often, wait 2 minutes between loops
                tmp.rmtree()