parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'
    
    # Create the spotted model    
    model = Fulda_lumped(datetime.datetime(begin,1,1),
                          datetime.datetime(end,12,31), with_valid_data = True,
                        shift_one_day = True)
    if 'i' in sys.argv:
        runs = 0
    elif 'v' in sys.argv:
        sys.argv.remove('v')
        best = eval(open(prefix + '-best.dict').read())
        best.pop('Eff')
        model.setparameters(**best)
        model.begin = datetime.datetime(1986,1,1)
        model.end = datetime.datetime(1988,12,31)
        resQ = np.array(model.runmodel())
        model.plotvalidation(np.array(resQ))
        runs = 0
    elif len(sys.argv)>1:
        runs = int(sys.argv[1])
    if runs:
        sampler = Sampler(model, parallel=parallel)
      #  sampler.datawriter = DataWriter(prefix,model.params, model.begin, model.end, 0.0)
      # multi objective datawriter
        sampler.datawriter = DataWriter(prefix, model.params, model.begin, model.end, simthreshold_NS = 0.50, 
                                        simthreshold_pbias = 25.0, simthreshold_rsr = 0.70,
                                        with_valid_data = model.with_valid_data,
                                        shift_one_day = model.shift_one_day)
        # Now we can sample with the implemented Latin hypercube algorithm:
        sampler.sample(runs)
Beispiel #2
0
    runs = 2

    # File names of the forcing data
    fnQ = "Q_Kammerzell_1979_1999.txt"
    fnT = "T_kammerzell_1979_1999_max_min_avg.txt"
    fnP = "P_Krigavg_kammerzell_1979_1999.txt"

    # import algorithm
    from spotpy.algorithms import mc as Sampler

    # Find out if the model should run parallel (for supercomputer)
    parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'

    # Create the model
    model = ComplexLumped(datetime.datetime(begin, 1, 1),
                          datetime.datetime(end, 12, 31))
    print(cmf.describe(model.project))
    # If there is an command line argument, take its value for the amount of
    #  runs
    if len(sys.argv) > 1:
        runs = int(sys.argv[1])

    # run the model
    if runs:
        sampler = Sampler(model,
                          parallel=parallel,
                          dbname="complex_lumped",
                          dbformat="csv",
                          save_sim=True)
        sampler.sample(runs)  #, subsets = 30)
Beispiel #3
0
                         datetime.datetime(end, 12, 31),
                         with_valid_data=True,
                         shift_one_day=True)
    if 'i' in sys.argv:
        runs = 0
    elif 'v' in sys.argv:
        sys.argv.remove('v')
        best = eval(open(prefix + '-best.dict').read())
        best.pop('Eff')
        model.setparameters(**best)
        model.begin = datetime.datetime(1986, 1, 1)
        model.end = datetime.datetime(1988, 12, 31)
        resQ = np.array(model.runmodel())
        model.plotvalidation(np.array(resQ))
        runs = 0
    elif len(sys.argv) > 1:
        runs = int(sys.argv[1])
    if runs:
        sampler = Sampler(model,
                          parallel=parallel,
                          dbformat="csv",
                          dbname="test")
        #  sampler.datawriter = DataWriter(prefix,model.params, model.begin, model.end, 0.0)
        # multi objective datawriter
        #        sampler.datawriter = DataWriter(prefix, model.params, model.begin, model.end, simthreshold_NS = 0.50,
        #                                        simthreshold_pbias = 25.0, simthreshold_rsr = 0.70,
        #                                        with_valid_data = model.with_valid_data,
        #                                        shift_one_day = model.shift_one_day)
        # Now we can sample with the implemented Latin hypercube algorithm:
        sampler.sample(runs)
Beispiel #4
0

if __name__ == '__main__':
    # Get sampler
    from spotpy.algorithms import lhs as Sampler

    # Check if we are running on a supercomputer or local
    parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'

    # Run the models
    runs = 5
    num_cells = [1, 2, 4, 8]
    results = {}
    for num in num_cells:
        # Create the model
        model = ScalingTester(num_cells=num)
        print(cmf.describe(model.project))
        # Create the sampler
        sampler = Sampler(model,
                          parallel=parallel,
                          dbname=model.dbname,
                          dbformat='csv',
                          save_sim=False)

        sampler.sample(runs)
        results[str(num)] = sampler.status.objectivefunction

    for key, value in results.items():
        print("The model with {} cell(s) has a best NS of {}".format(
            key, value))
    # File names of the forcing data
    fnQ = "Q_Kammerzell_1979_1999.txt"
    fnT = "T_kammerzell_1979_1999_max_min_avg.txt"
    fnP = "P_Krigavg_kammerzell_1979_1999.txt"

    # import algorithm
    from spotpy.algorithms import rope as Sampler

    # Find out if the model should run parallel (for supercomputer)
    parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'

    # Create the model
    model = SimpleLumped(datetime.datetime(begin, 1, 1),
                         datetime.datetime(end, 12, 31))

    # If there is an command line argument, take its value for the amount of
    #  runs
    if len(sys.argv) > 1:
        runs = int(sys.argv[1])

    # run the model
    if runs:
        sampler = Sampler(model,
                          parallel=parallel,
                          dbname="simple_lumped_hargreaves",
                          dbformat="csv",
                          save_sim=True,
                          save_threshold=[0.0, 0.0])
        sampler.sample(runs, subsets=30)
Beispiel #6
0
    fnP = "P_Krigavg_kammerzell_1979_1999.txt"
    fnSun = "sunshine_hours_mw_fulda_wasserkuppe_1979_1989.txt"
    fnWind = "windspeed_m_s_mw_fulda_wasserkuppe_1979_1989.txt"
    fnVapor = "vapor_pressure_kpa_mw_fulda_wasserkuppe_1979_1989.txt"
    fnRelHum = "rel_hum_percent_mw_fulda_wasserkuppe_1979_1989.txt"

    # import algorithm
    from spotpy.algorithms import rope as Sampler

    # Find out if the model should run parallel (for supercomputer)
    parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'

    # Create the model
    model = ComplexLumped(datetime.datetime(begin, 1, 1),
                          datetime.datetime(end, 12, 31))

    # If there is an command line argument, take its value for the amount of
    #  runs
    if len(sys.argv) > 1:
        runs = int(sys.argv[1])

    # run the model
    if runs:
        sampler = Sampler(model,
                          parallel=parallel,
                          dbname="complex_lumped_penman",
                          dbformat="csv",
                          save_sim=True,
                          save_threshold=[0.0, 0.0])
        sampler.sample(runs, subsets=30)
Beispiel #7
0
# http://stackoverflow.com/questions/419163/what-does-if-name-main-do
if __name__ == '__main__':

    # Get the Monte-Carlo sampler
    from spotpy.algorithms import mc as Sampler


    # Create the model
    model = SingleStorage(datetime.datetime(1980, 1, 1),
                          datetime.datetime(1985, 12, 31))

    runs = get_runs(default=1)
    # Create the sampler
    sampler = Sampler(model,
                      parallel=parallel(),
                      dbname=model.dbname, dbformat='csv',
                      save_sim=True)

    # Print our configuration
    print(spotpy.describe.describe(sampler))

    # Do the sampling
    if runs > 1:
        # Now we can sample with the implemented Monte Carlo algorithm:
        sampler.sample(runs)

    else:

        result = model.simulation(verbose=True)
        for name, value in spotpy.objectivefunctions.calculate_all_functions(model.evaluation(), result):
            try:
        plt.plot(x, self.evaluation(), 'k')
        plt.show()


#%%

# http://stackoverflow.com/questions/419163/what-does-if-name-main-do
if __name__ == '__main__':
    # Importiere Algorithmus
    from spotpy.algorithms import lhs as Sampler

    # Finde heraus, ob das ganze parallel laufen soll (für Supercomputer)
    parallel = 'mpi' if 'OMPI_COMM_WORLD_SIZE' in os.environ else 'seq'

    # Create the spotted model
    model = SingleStorage(datetime.datetime(begin, 1, 1),
                          datetime.datetime(end, 12, 31))
    if runs:
        sampler = Sampler(model,
                          parallel=parallel,
                          dbname='lhs-1stor',
                          dbformat='csv',
                          save_sim=True)
        # sampler.datawriter = DataWriter(model.params, model.begin, model.end, 0.0)
        # Now we can sample with the implemented Monte Carlo algortihm:
        sampler.sample(runs)

    # plottet das Ergebnis
    # TODO: Threshold anpassen für die graue Fläche im Plot. Eure Modelle schaffen hoffentlich deutlich mehr...
#  model.plotsimulation(0.3)