def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True, dbappend=False, parallel='seq', save_sim=True, breakpoint=None, backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16, sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''): # Initialize the user defined setup class self.setup = spot_setup param_info = parameter.get_parameters_array( self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types) self.all_params = param_info['random'] self.constant_positions = parameter.get_constant_indices(spot_setup) if self.constant_positions: self.non_constant_positions = [] for i, val in enumerate(self.all_params): if self.all_params[i] not in self.constant_positions: self.non_constant_positions.append(i) else: self.non_constant_positions = np.arange(0, len(self.all_params)) self.parameter = self.get_parameters self.parnames = param_info['name'] self.algorithm_name = algorithm_name # Create a type to hold the parameter values using a namedtuple self.partype = parameter.ParameterSet(param_info) self.evaluation = self.setup.evaluation() self.save_sim = save_sim self.optimization_direction = optimization_direction self.dbname = dbname or 'customDb' self.dbformat = dbformat or 'ram' self.db_precision = db_precision self.breakpoint = breakpoint self.backup_every_rep = backup_every_rep # Two parameters to control the data base handling # 'dbinit' triggers the initial creation of the data base file # 'dbappend' used to append to the existing data base, after restart self.dbinit = dbinit self.dbappend = dbappend # Set the random state if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary. random_state = np.random.randint(low=0, high=2**30) np.random.seed(random_state) # If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value self.sim_timeout = sim_timeout self.save_threshold = save_threshold if breakpoint == 'read' or breakpoint == 'readandwrite': print('Reading backupfile') try: open(self.dbname + '.break') except FileNotFoundError: print('Backupfile not found') self.dbappend = True # Now a repeater (ForEach-object) is loaded # A repeater is a convinent wrapper to repeat tasks # We have the same interface for sequential and for parallel tasks if parallel == 'seq': from spotpy.parallel.sequential import ForEach elif parallel == 'mpi': from spotpy.parallel.mpi import ForEach # MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order # as the parameters are elif parallel == 'mpc': from spotpy.parallel.mproc import ForEach # UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order # as the subprocesses are finished which may speed up the whole simulation process but is not recommended if # objective functions do their calculation based on the order of the data because the order of the result is chaotic # and randomized elif parallel == 'umpc': from spotpy.parallel.umproc import ForEach else: raise ValueError( "'%s' is not a valid keyword for parallel processing" % parallel) # This is the repeater for the model runs. The simulate method does the work # If you need different tasks, the repeater can be pushed into a "phase" using the # setphase function. The simulate method can check the current phase and dispatch work # to other functions. This is introduced for sceua to differentiate between burn in and # the normal work on the chains self.repeat = ForEach(self.simulate) # method "save" needs to know whether objective function result is list or float, default is float self.like_struct_typ = type(1.1)
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True, dbappend=False, parallel='seq', save_sim=True, alt_objfun=None, breakpoint=None, backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16, sim_timeout=None, random_state=None): # Initialize the user defined setup class self.setup = spot_setup # Philipp: Changed from Tobi's version, now we are using both new class defined parameters # as well as the parameters function. The new method get_parameters # can deal with a missing parameters function # # For me (Philipp) it is totally unclear why all the samplers should call this function # again and again instead of # TODO: just storing a definite list of parameter objects here param_info = parameter.get_parameters_array( self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types) self.all_params = param_info['random'] self.constant_positions = parameter.get_constant_indices(spot_setup) if self.constant_positions: self.non_constant_positions = [] for i, val in enumerate(self.all_params): if self.all_params[i] not in self.constant_positions: self.non_constant_positions.append(i) else: self.non_constant_positions = np.arange(0, len(self.all_params)) self.parameter = self.get_parameters self.parnames = param_info['name'] # Create a type to hold the parameter values using a namedtuple self.partype = parameter.ParameterSet(param_info) # use alt_objfun if alt_objfun is defined in objectivefunctions, # else self.setup.objectivefunction self.objectivefunction = getattr(objectivefunctions, alt_objfun or '', None) or self.setup.objectivefunction self.evaluation = self.setup.evaluation() self.save_sim = save_sim self.dbname = dbname or 'customDb' self.dbformat = dbformat or 'ram' self.db_precision = db_precision self.breakpoint = breakpoint self.backup_every_rep = backup_every_rep # Two parameters to control the data base handling # 'dbinit' triggers the initial creation of the data base file # 'dbappend' used to append to the existing data base, after restart self.dbinit = dbinit self.dbappend = dbappend # Set the random state if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary. random_state = np.random.randint(low=0, high=2**30) np.random.seed(random_state) # If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value self.sim_timeout = sim_timeout self.save_threshold = save_threshold if breakpoint == 'read' or breakpoint == 'readandwrite': print('Reading backupfile') self.dbappend = True self.breakdata = self.read_breakdata(self.dbname) # Now a repeater (ForEach-object) is loaded # A repeater is a convinent wrapper to repeat tasks # We have the same interface for sequential and for parallel tasks if parallel == 'seq': from spotpy.parallel.sequential import ForEach elif parallel == 'mpi': from spotpy.parallel.mpi import ForEach # MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order # as the parameters are elif parallel == 'mpc': from spotpy.parallel.mproc import ForEach # UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order # as the subprocesses are finished which may speed up the whole simulation process but is not recommended if # objective functions do their calculation based on the order of the data because the order of the result is chaotic # and randomized elif parallel == 'umpc': from spotpy.parallel.umproc import ForEach else: raise ValueError( "'%s' is not a valid keyword for parallel processing" % parallel) # This is the repeater for the model runs. The simulate method does the work # If you need different tasks, the repeater can be pushed into a "phase" using the # setphase function. The simulate method can check the current phase and dispatch work # to other functions. This is introduced for sceua to differentiate between burn in and # the normal work on the chains self.repeat = ForEach(self.simulate) self.status = _RunStatistic()