예제 #1
0
def main():
    filename = './data/hashcode.in'
    print('Reading simulation...')

    sim = Simulation(filename)

    street_ids_with_cars = set()
    for car in sim.cars:
        street_ids_with_cars.update([s.id for s in car.path[:-1]])

    for isect in sim.intersections:
        isect.set_schedule(
            [(s, 1) for s in isect.queues.keys() if s in street_ids_with_cars],
            sim.duration)

    time_beg = datetime.now()

    score, arrivals = sim.simulate()

    time_end = datetime.now()
    print(time_end - time_beg)

    print(f'score: {score}')

    sim.write_schedule('./data/submission.csv')
예제 #2
0
def run(probability, samples=1, turning=False, fname=None):
    # Run simulation for each radius
    average_collisions = []
    for i, sample in enumerate(range(samples)):
        print '\tSim {} of {}'.format(i + 1, samples)
        sim = Simulation(probability, turning=turning)
        sim.run(animate=False, fname=fname)
        average_collisions.append(sim.average_collisions())
    return float(sum(average_collisions)) / len(average_collisions)
예제 #3
0
def run(netdef,tosave,modify,procs,thisProc,stims,param,repeats,sim_time,SaveSpikes,SaveVoltage,SaveConductance,SaveCurrent):
    net = netdef()

    if SaveVoltage:
        net.recordVoltage()

    repeats = int(repeats)
    # Randomseed was 200 for most figures
    # Changed to 200 for rat
    # Changed to 200 for anurans
    s = Simulation(net, randomseed=202,delay=25)
    s.verbose = False
    s.sim_time = sim_time
    s.dt = 0.050
    total = len(stims)*len(param)*repeats
    spp = ceil(float(total)/procs)
    start = thisProc*spp
    end = (thisProc+1)*spp
    count = 0
    for a in param: 
        s.set_amplitude(net.sim_amp)
        for d in stims*repeats:
            if count >= start and count < end:
                net = modify(net,a,d)
                progress.update(count-start,spp,thisProc)
                s.stim_dur = d 
                s.run()
                key = [a,d] 
                net.savecells(tosave, key, spikes=SaveSpikes,voltage=SaveVoltage,conductance=SaveConductance,current=SaveCurrent)
            count += 1
    progress.update(spp,spp,thisProc)

    r = [thisProc,net.savedparams,net.savedcells]
    return r
예제 #4
0
def main():
    # Read arguments
    args = parser().parse_args()
    seed(args.seed)
    # Run once
    if not args.run_all:
        sim = Simulation(args.probability, turning=args.turning)
        sim.run(animate=args.animate, fname=args.output_file)
        print 'collisions = {}'.format(sim.average_collisions())
    # Run all
    else:
        probabilities = np.linspace(0.04, 0.2, 17)
        run_all(probabilities, turning=args.turning, fname=args.output_file)
def Main (args):
  show_converge = True
  if len(args) != 10:
    print >>sys.stderr, "Usage: perturbation_constant.py setup stable_time mean_recovery end_time " + \
                                " begin_mean_perturb end_mean_perturn step_mean_perturb sampling_rate seed bootstrap"
  else:
    topo = open(args[0]).read()
    stable = float(args[1])
    mean_recovery = float(args[2])
    end_time = float(args[3])
    begin = float(args[4])
    end = float(args[5])
    step = float(args[6])
    sampling_rate = float(args[7])
    seed = int(args[8])
    bootstrap = (args[9].lower() == "true")
    print "Setting %s %f %f %f %f %f %f %f %d"%(args[0], stable, mean_recovery, end_time, begin, end,\
            step, sampling_rate, seed)
    topo_yaml = yaml.load(topo)

    # If no fail links then just use links
    links = topo_yaml['links']

    if 'fail_links' in topo_yaml:
      fail_links = topo_yaml['fail_links']
    else:
      fail_links = links

    for mean in np.arange(begin, end, step):
      Singleton.clear()
      sim = Simulation()
      sim.check_always = False
      random.seed(seed)
      print "mean_perturb %f"%(mean)
      print "generating trace"
      (end_time, new_trace) = TransformTrace(links, fail_links, mean, mean_recovery, stable, end_time, bootstrap)
      print "done generating trace"
      print "TRACE TRACE TRACE"
      for t in new_trace:
        print t
      print "TRACE TRACE TRACE"
      sim.Setup(topo, new_trace, False)
      for time in np.arange(stable, end_time, sampling_rate):
        sim.scheduleCheck(time)
      ## Measure latency less often
      #for time in np.arange(stable, end_time, sampling_rate):
        #for (ha, hb) in permutations(sim.hosts, 2):
          #sim.scheduleSend(time, ha.name, ha.address, hb.address)
      sim.Run()
      sim.Report(show_converge)
      sim.Clear()
예제 #6
0
def uploadData(setData):
    try:
        mySimulation = Simulation()
        mySimulation.setUp(dataUploadStorage[session["user_id"]]["data"])
        mySimulation.runNewSim()
    except:
        #print("Simulation failed.")
        pass
    thesePaths = mySimulation.getAllPaths()
    theseHeatMaps = mySimulation.getAllHeatData()
    thisFileName = str(setData[0])
    rows = str(setData[1])
    cols = str(setData[2])
    getLocations = parseLocations(rows, cols, setData[3])
    getSubjectMap = parseSubjectMap(setData[4])
    db = connectToDB()
    cur = db.cursor(cursor_factory=psycopg2.extras.DictCursor)
    cur.execute(
        "INSERT INTO datasets (datasetname, userid, heatdata, vectordata, locationmap, subjectmap) VALUES (%s, %s, %s, %s, %s, %s)",
        (thisFileName, 1, json.dumps(theseHeatMaps), json.dumps(thesePaths),
         json.dumps(getLocations), json.dumps(getSubjectMap)))
    db.commit()
    cur.close()
    db.close()
    session["currentlyUploading"] = False
    del dataUploadStorage[session["user_id"]]
    session["uploadingFileName"] = ""
    emit('finishedUploading')
예제 #7
0
파일: main.py 프로젝트: Drvanon/stars
def init(screen_size):
    screen = init_screen(screen_size)

    images = {}
    images['star0'] = pygame.image.load('img/star0.png')
    images['star0'].set_colorkey((0, 0, 0))
    images['star1'] = pygame.image.load('img/star0.png')
    images['star1'].set_colorkey((0, 0, 0))

    images['star0yellow'] = colored_copy(images['star0'], (0, 255, 255, 0))

    sim = Simulation()
    sim.draw_stars(images, screen)

    return screen, images, sim
예제 #8
0
def run_ratio(ratio, radii, tracking, coverage=0.1, T=120, dt=1, fname=None):
    '''Runs the specified target-robot ratio for the specified environment radii'''
    # Run simulation for each radius
    observations = []
    for i, R in enumerate(radii):
        m_max, n_max = 10, 20
        if ratio <= 1:
            m = m_max
            n = int(ratio * m_max)
        else:
            n = n_max
            m = int(n_max / ratio)
        print '\tSim {} of {}: n = {}, m = {}'.format(i + 1, len(radii), n, m)
        sim = Simulation(m, n, T, dt, R, tracking=tracking)
        sim.run(vis='', fname=fname)
        observations.append(sim.average_observations(normalize=True))
    return radii, observations
예제 #9
0
 def quick_match(self, teams):
     home = teams[0]
     away = teams[1]
     print('Selected teams: \n {} vs {}'.format(home.name, away.name))
     input('press enter to continue')
     result = Simulation.simulate(teams)
     print(' {} vs {}\n {} - {}'.format(result[0].name, result[1].name,
                                        result[0].result, result[1].result))
예제 #10
0
def main():

    # Create first generation
    pool = Pop(size_gen)

    # Step through generations
    for _ in range(num_gen):

        # initialize data histogram for sim visualization
        hist = Data()
        terrain = Terrain(400, 4)

        # create pybox2d dynamic bodies based on individual's gene
        gen = [Biped(pool.population[i].name, pool.population[i].gene)
               for i in range(size_gen)]

        for k, biped in enumerate(gen):

            # specify terrain and biped to race
            race = Sim(terrain, biped)

            if not hist.terrain:
                hist.set_terrain(terrain)

            # run simulation without visualization
            score, bb, time = race.run(1e4)

            # store sim data and fitness evaluation
            pool.population[k].fitness = score
            hist.timelines[biped.name] = race.history.timelines['timeline']

        # resort gene pool
        pool.population = list(
            sorted(pool.population, key=lambda x: x.fitness))

        # visualize top bipeds' simulations
        shown = pool.population[-num_shown:]
        timelines = [s.name for s in shown]
        view.start()
        view.run(hist, timelines, speed=3)

        # evolve gene pool
        pool.evolve()
예제 #11
0
def main():
    # Read arguments
    args = parser().parse_args()
    seed(args.seed)
    # Run once
    if not args.run_all:
        sim = Simulation(args.m,
                         args.n,
                         args.t,
                         args.dt,
                         args.r,
                         tracking=args.tracking)
        sim.run(vis=args.visualization, fname=args.output_file)
        print 'observations = {}'.format(
            sim.average_observations(normalize=True))
    # Run ratios
    else:
        ratios = [1 / 5., 1 / 2., 1, 4, 10]
        run_ratios(ratios, tracking=args.tracking, fname=args.output_file)
예제 #12
0
def Main (args):
  show_converge = True
  if len(args) != 7:
    print >>sys.stderr, "Usage: perturbation.py setup trace stable_time begin_mean_perturb end_mean_perturn step_mean_perturb seed"
  else:
    topo = open(args[0]).read()
    trace = open(args[1]).readlines()
    stable = float(args[2])
    begin = float(args[3])
    end = float(args[4])
    step = float(args[5])
    seed = int(args[6])
    print "Setting %s %s %f %f %f %f %d"%(args[0], args[1], stable, begin, end, step, seed)
    for mean in np.arange(begin, end, step):
      Singleton.clear()
      sim = Simulation()
      sim.check_always = False
      random.seed(seed)
      print "mean_perturb %f"%(mean)
      (end_time, new_trace) = TransformTrace(trace, mean, stable)
      sim.Setup(topo, new_trace, True)
      for time in np.arange(stable, end_time, 0.5 * mean):
        sim.scheduleCheck(time)
      # Measure latency less often
      for time in np.arange(stable, end_time, mean):
        for (ha, hb) in permutations(sim.hosts, 2):
          sim.scheduleSend(time, ha.name, ha.address, hb.address)
      sim.Run()
      sim.Report(show_converge)
      sim.Clear()
예제 #13
0
파일: doe.py 프로젝트: MannyKayy/casiopeia
    def _set_optimization_variables_initials(self, qinit, x0, uinit):

        self.simulation = Simulation(self._discretization.system, \
            self._pdata, qinit)
        self.simulation.run_system_simulation(x0, \
            self._discretization.time_points, uinit, print_status = False)
        xinit = self.simulation.simulation_results

        repretitions_xinit = \
            self._discretization.optimization_variables["X"][:,:-1].shape[1] / \
                self._discretization.number_of_intervals
        
        Xinit = ci.repmat(xinit[:, :-1], repretitions_xinit, 1)

        Xinit = ci.horzcat([ \

            Xinit.reshape((self._discretization.system.nx, \
                Xinit.numel() / self._discretization.system.nx)),
            xinit[:, -1],

            ])

        uinit = inputchecks.check_controls_data(uinit, \
            self._discretization.system.nu, \
            self._discretization.number_of_intervals)
        Uinit = uinit

        qinit = inputchecks.check_constant_controls_data(qinit, \
            self._discretization.system.nq)
        Qinit = qinit

        self._optimization_variables_initials = ci.veccat([ \

                Uinit,
                Qinit,
                Xinit,

            ])
예제 #14
0
    def next_game(self):
        if self.fixtures != []:
            for game in self.fixtures[0]:
                home = Club(game[0])
                away = Club(game[1])
                if home.name == self.club.name or away.name == self.club.name:
                    print(game)
                    input('press enter to continue')
                teams = [home, away]
                result = Simulation.simulate(teams)
                self.competition._update_standings(result)
                self.played.append([(result[0].name, result[1].name),
                                    (result[0].result, result[1].result)])
            self.fixtures.pop(0)
        else:
            print('End of the season')

        self.menu()
예제 #15
0
def sim_process(sim_queue, client_queue):
    sim = Simulation('../model/Cirship.fmu')
    print('Simulation ready')
    id = 0

    watches = {
        'real': set(),
        'bool': set(),
    }

    while True:
        while not client_queue.empty():
            msg = client_queue.get_nowait()
            if msg['type'] == 'set':
                ref = msg['ref']
                value = msg['value']

                if isinstance(value, bool):
                    sim.set_bool(ref, value)
                else:
                    sim.set_real(ref, value)
            elif msg['type'] == 'watch':
                for ref in msg.get('real', []):
                    watches['real'].add(ref)
                for ref in msg.get('bool', []):
                    watches['bool'].add(ref)

        t = sim.update()
        id += 1

        reals = {ref: sim.get_real(ref) for ref in watches['real']}
        bools = {ref: sim.get_bool(ref) for ref in watches['bool']}

        refs = {'id': id, 'time': t, **reals, **bools}

        sim_queue.put(refs)
        time.sleep(0.05)
예제 #16
0
파일: doe.py 프로젝트: adbuerger/casiopeia
    def _set_optimization_variables_initials(self, qinit, x0, uinit):

        self.simulation = Simulation(self._discretization.system, \
            self._pdata, qinit)
        self.simulation.run_system_simulation(x0, \
            self._discretization.time_points, uinit, print_status = False)
        xinit = self.simulation.simulation_results

        repretitions_xinit = \
            self._discretization.optimization_variables["X"][:,:-1].shape[1] / \
                self._discretization.number_of_intervals
        
        Xinit = ci.repmat(xinit[:, :-1], repretitions_xinit, 1)

        Xinit = ci.horzcat([ \

            Xinit.reshape((self._discretization.system.nx, \
                Xinit.size() / self._discretization.system.nx)),
            xinit[:, -1],

            ])

        uinit = inputchecks.check_controls_data(uinit, \
            self._discretization.system.nu, \
            self._discretization.number_of_intervals)
        Uinit = uinit

        qinit = inputchecks.check_constant_controls_data(qinit, \
            self._discretization.system.nq)
        Qinit = qinit

        self._optimization_variables_initials = ci.veccat([ \

                Uinit,
                Qinit,
                Xinit,

            ])
예제 #17
0
def simulate(ind):
    return Simulation().simulate(ind)
예제 #18
0
파일: doe.py 프로젝트: adbuerger/casiopeia
class DoE(DoEProblem):

    '''The class :class:`casiopeia.doe.DoE` is used to set up
    Design-of-Experiments-problems for systems defined with the
    :class:`casiopeia.system.System` class.

    The aim of the experimental design optimization is to identify a set of
    controls that can be used for the generation of measurement data which
    allows for a better estimation of the unknown parameters of a system.

    To achieve this, an information function on the covariance matrix of the
    estimated parameters is minimized. The values of the estimated parameters,
    though they are mostly an initial
    guess for their values, are not changed during the optimization.

    Optimum experimental design and parameter estimation methods can be used
    interchangeably until a desired accuracy of the parameters has been
    achieved.
    '''

    @property
    def optimized_controls(self):

        return self.design_results["x"][ \
            :(self._discretization.number_of_intervals * \
                self._discretization.system.nu)]


    def _discretize_system(self, system, time_points, discretization_method, \
        **kwargs):

        if system.nx == 0:

            self._discretization = NoDiscretization(system, time_points)

        elif system.nx != 0:

            if discretization_method == "collocation":

                self._discretization = ODECollocation( \
                    system, time_points, **kwargs)

            elif discretization_method == "multiple_shooting":

                self._discretization = ODEMultipleShooting( \
                    system, time_points, **kwargs)

            else:

                raise NotImplementedError('''
Unknown discretization method: {0}.
Possible values are "collocation" and "multiple_shooting".
'''.format(str(discretization_method)))
      

    def _set_parameter_guess(self, pdata):

        self._pdata = inputchecks.check_parameter_data(pdata, \
            self._discretization.system.np)


    def _apply_parameters_to_equality_constraints(self):

        optimization_variables_for_equality_constraints = ci.veccat([ \

                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"], 
                self._discretization.optimization_variables["EPS_U"], 
                self._discretization.optimization_variables["P"], 

            ])

        optimization_variables_parameters_applied = ci.veccat([ \

                self._discretization.optimization_variables["U"], 
                self._discretization.optimization_variables["Q"], 
                self._discretization.optimization_variables["X"], 
                ci.mx(*self._discretization.optimization_variables["EPS_U"].shape), 
                self._pdata, 

            ])

        equality_constraints_fcn = ci.mx_function( \
            "equality_constraints_fcn", \
            [optimization_variables_for_equality_constraints], \
            [self._discretization.equality_constraints])

        [self._equality_constraints_parameters_applied] = \
            equality_constraints_fcn([optimization_variables_parameters_applied])


    def _apply_parameters_to_discretization(self):

        self._apply_parameters_to_equality_constraints()


    def _set_optimization_variables(self):

        self._optimization_variables = ci.veccat([ \

                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],

            ])


    def _set_optimization_variables_initials(self, qinit, x0, uinit):

        self.simulation = Simulation(self._discretization.system, \
            self._pdata, qinit)
        self.simulation.run_system_simulation(x0, \
            self._discretization.time_points, uinit, print_status = False)
        xinit = self.simulation.simulation_results

        repretitions_xinit = \
            self._discretization.optimization_variables["X"][:,:-1].shape[1] / \
                self._discretization.number_of_intervals
        
        Xinit = ci.repmat(xinit[:, :-1], repretitions_xinit, 1)

        Xinit = ci.horzcat([ \

            Xinit.reshape((self._discretization.system.nx, \
                Xinit.size() / self._discretization.system.nx)),
            xinit[:, -1],

            ])

        uinit = inputchecks.check_controls_data(uinit, \
            self._discretization.system.nu, \
            self._discretization.number_of_intervals)
        Uinit = uinit

        qinit = inputchecks.check_constant_controls_data(qinit, \
            self._discretization.system.nq)
        Qinit = qinit

        self._optimization_variables_initials = ci.veccat([ \

                Uinit,
                Qinit,
                Xinit,

            ])


    def _set_optimization_variables_lower_bounds(self, umin, qmin, xmin, x0):

        umin_user_provided = umin

        umin = inputchecks.check_controls_data(umin, \
            self._discretization.system.nu, 1)

        if umin_user_provided is None:

            umin = -np.inf * np.ones(umin.shape)

        Umin = ci.repmat(umin, 1, \
            self._discretization.optimization_variables["U"].shape[1])


        qmin_user_provided = qmin

        qmin = inputchecks.check_constant_controls_data(qmin, \
            self._discretization.system.nq)

        if qmin_user_provided is None:

            qmin = -np.inf * np.ones(qmin.shape)

        Qmin = qmin


        xmin_user_provided = xmin

        xmin = inputchecks.check_states_data(xmin, \
            self._discretization.system.nx, 0)

        if xmin_user_provided is None:

            xmin = -np.inf * np.ones(xmin.shape)

        Xmin = ci.repmat(xmin, 1, \
            self._discretization.optimization_variables["X"].shape[1])

        Xmin[:,0] = x0


        self._optimization_variables_lower_bounds = ci.veccat([ \

                Umin,
                Qmin,
                Xmin,

            ])


    def _set_optimization_variables_upper_bounds(self, umax, qmax, xmax, x0):

        umax_user_provided = umax

        umax = inputchecks.check_controls_data(umax, \
            self._discretization.system.nu, 1)

        if umax_user_provided is None:

            umax = np.inf * np.ones(umax.shape)

        Umax = ci.repmat(umax, 1, \
            self._discretization.optimization_variables["U"].shape[1])


        qmax_user_provided = qmax

        qmax = inputchecks.check_constant_controls_data(qmax, \
            self._discretization.system.nq)

        if qmax_user_provided is None:

            qmax = -np.inf * np.ones(qmax.shape)

        Qmax = qmax


        xmax_user_provided = xmax

        xmax = inputchecks.check_states_data(xmax, \
            self._discretization.system.nx, 0)

        if xmax_user_provided is None:

            xmax = np.inf * np.ones(xmax.shape)

        Xmax = ci.repmat(xmax, 1, \
            self._discretization.optimization_variables["X"].shape[1])

        Xmax[:,0] = x0


        self._optimization_variables_upper_bounds = ci.veccat([ \

                Umax,
                Xmax,

            ])


    def _set_measurement_data(self):

        # The DOE problem does not depend on actual measurement values,
        # the measurement deviations are only needed to set up the objective;
        # therefore, dummy-values for the measurements can be used
        # (see issue #7 for further information)

        measurement_data = np.zeros((self._discretization.system.nphi, \
            self._discretization.number_of_intervals + 1))

        self._measurement_data_vectorized = ci.vec(measurement_data)


    def _set_weightings(self, wv, weps_u):

        input_error_weightings = \
            inputchecks.check_input_error_weightings(weps_u, \
            self._discretization.system.neps_u, 
            self._discretization.number_of_intervals)

        measurement_weightings = \
            inputchecks.check_measurement_weightings(wv, \
            self._discretization.system.nphi, \
            self._discretization.number_of_intervals + 1)

        self._weightings_vectorized = ci.veccat([ \

            input_error_weightings, 
            measurement_weightings,

            ])


    def _set_measurement_deviations(self):

        self._measurement_deviations = ci.vertcat([ \

                ci.vec(self._discretization.measurements) - \
                self._measurement_data_vectorized + \
                ci.vec(self._discretization.optimization_variables["V"])

            ])


    def _setup_constraints(self):

        self._constraints = ci.vertcat([ \

                self._measurement_deviations,
                self._discretization.equality_constraints,

            ])


    def _set_cov_matrix_derivative_directions(self):

        # These correspond to the optimization variables of the parameter
        # estimation problem; the evaluation of the covariance matrix, though,
        # does not depend on the actual values of V, EPS_E and EPS_U, and with
        # this, the DoE problem does not

        self._cov_matrix_derivative_directions = ci.veccat([ \

                self._discretization.optimization_variables["P"],
                self._discretization.optimization_variables["X"],
                self._discretization.optimization_variables["EPS_U"],
                self._discretization.optimization_variables["V"],

            ])


    def _setup_gauss_newton_lagrangian_hessian(self):

        gauss_newton_lagrangian_hessian_diag = ci.vertcat([ \
            ci.mx(self._cov_matrix_derivative_directions.shape[0] - \
                self._weightings_vectorized.shape[0], 1), \
            self._weightings_vectorized])

        self._gauss_newton_lagrangian_hessian = ci.diag( \
            gauss_newton_lagrangian_hessian_diag)


    def _setup_covariance_matrix_for_evaluation(self):

        covariance_matrix_free_variables = ci.veccat([ \

                self._discretization.optimization_variables["P"],
                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],
                self._discretization.optimization_variables["EPS_U"],

            ])

        self._covariance_matrix_fcn = ci.mx_function("covariance_matrix_fcn", \
            [covariance_matrix_free_variables], \
            [self._covariance_matrix.covariance_matrix])


    def _apply_parameters_to_objective(self):

        # As mentioned above, the objective does not depend on the actual
        # values of V, but on the values of P and EPS_E and EPS_U, while
        # P is fed from pdata, and EPS_E, EPS_u are supposed to be 0

        objective_free_variables = ci.veccat([ \

                self._discretization.optimization_variables["P"],
                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],
                self._discretization.optimization_variables["EPS_U"],

            ])

        objective_free_variables_parameters_applied = ci.veccat([ \

                self._pdata,
                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],
                ci.mx(*self._discretization.optimization_variables["EPS_U"].shape),

            ])

        objective_fcn = ci.mx_function("objective_fcn", \
            [objective_free_variables], [self._objective_parameters_free])

        [self._objective] = objective_fcn( \
            [objective_free_variables_parameters_applied])


    def __init__(self, system, time_points, \
        uinit = None, umin = None, umax = None, \
        qinit = None, qmin = None, qmax = None, \
        pdata = None, x0 = None, \
        xmin = None, xmax = None, \
        wv = None, weps_u = None, \
        discretization_method = "collocation", \
        optimality_criterion = "A", **kwargs):

        r'''
        :raises: AttributeError, NotImplementedError

        :param system: system considered for parameter estimation, specified
                       using the :class:`casiopeia.system.System` class
        :type system: casiopeia.system.System

        :param time_points: time points :math:`t_\text{N} \in \mathbb{R}^\text{N}`
                   used to discretize the continuous time problem. Controls
                   will be applied at the first :math:`N-1` time points,
                   while measurements take place at all :math:`N` time points.
        :type time_points: numpy.ndarray, casadi.DMatrix, list

        :param umin: optional, lower bounds of the time-varying controls
                   :math:`u_\text{min} \in \mathbb{R}^{\text{n}_\text{u}}`;
                   if not values are given, :math:`-\infty` will be used
        :type umin: numpy.ndarray, casadi.DMatrix

        :param umax: optional, upper bounds of the time-vaying controls 
                   :math:`u_\text{max} \in \mathbb{R}^{\text{n}_\text{u}}`;
                   if not values are given, :math:`\infty` will be used
        :type umax: numpy.ndarray, casadi.DMatrix

        :param uinit: optional, initial guess for the values of the time-varying controls
                   :math:`u_\text{N} \in \mathbb{R}^{\text{n}_\text{u} \times \text{N}-1}`
                   that (might) change at the switching time points;
                   if no values are given, 0 will be used; note that a poorly
                   or wrongly chosen initial guess can cause the optimization
                   to fail, and note that the
                   the second dimension of :math:`u_N` is :math:`N-1` and not
                   :math:`N`, since there is no control value applied at the
                   last time point
        :type uinit: numpy.ndarray, casadi.DMatrix

        :param qmin: optional, lower bounds of the time-constant controls
                   :math:`q_\text{min} \in \mathbb{R}^{\text{n}_\text{q}}`;
                   if not values are given, :math:`-\infty` will be used
        :type qmin: numpy.ndarray, casadi.DMatrix

        :param qmax: optional, upper bounds of the time-constant controls
                   :math:`q_\text{max} \in \mathbb{R}^{\text{n}_\text{q}}`;
                   if not values are given, :math:`\infty` will be used
        :type qmax: numpy.ndarray, casadi.DMatrix

        :param qinit: optional, initial guess for the optimal values of the
                   time-constant controls
                   :math:`q_\text{init} \in \mathbb{R}^{\text{n}_\text{q}}`;
                   if not values are given, 0 will be used; note that a poorly
                   or wrongly chosen initial guess can cause the optimization
                   to fail
        :type qinit: numpy.ndarray, casadi.DMatrix

        :param pdata: values of the time-constant parameters 
                      :math:`p \in \mathbb{R}^{\text{n}_\text{p}}`
        :type pdata: numpy.ndarray, casadi.DMatrix

        :param x0: state values :math:`x_0 \in \mathbb{R}^{\text{n}_\text{x}}`
                   at the first time point :math:`t_0`
        :type x0: numpy.ndarray, casadi.DMatrix, list

        :param xmin: optional, lower bounds of the states
                      :math:`x_\text{min} \in \mathbb{R}^{\text{n}_\text{x}}`;
                      if no value is given, :math:`-\infty` will be used
        :type xmin: numpy.ndarray, casadi.DMatrix

        :param xmax: optional, lower bounds of the states
                      :math:`x_\text{max} \in \mathbb{R}^{\text{n}_\text{x}}`;
                      if no value is given, :math:`\infty` will be used
        :type xmax: numpy.ndarray, casadi.DMatrix 

        :param wv: weightings for the measurements
                   :math:`w_\text{v} \in \mathbb{R}^{\text{n}_\text{y} \times \text{N}}`
        :type wv: numpy.ndarray, casadi.DMatrix

        :param weps_u: weightings for the input errors
                   :math:`w_{\epsilon_\text{u}} \in \mathbb{R}^{\text{n}_{\epsilon_\text{u}}}`
                   (only necessary
                   if input errors are used within ``system``)
        :type weps_u: numpy.ndarray, casadi.DMatrix    

        :param discretization_method: optional, the method that shall be used for
                                      discretization of the continuous time
                                      problem w. r. t. the time points given 
                                      in :math:`t_\text{N}`; possible values are
                                      "collocation" (default) and
                                      "multiple_shooting"
        :type discretization_method: str

        :param optimality_criterion: optional, the information function
                                    :math:`I_\text{X}(\cdot)` to be used on the 
                                    covariance matrix, possible values are
                                    `A` (default) and `D`, while

                                    .. math ::

                                        \begin{aligned}
                                          I_\text{A}(\Sigma_\text{p}) & = \frac{1}{n_\text{p}} \text{Tr}(\Sigma_\text{p}),\\
                                          I_\text{D}(\Sigma_\text{p}) & = \begin{vmatrix} \Sigma_\text{p} \end{vmatrix} ^{\frac{1}{n_\text{p}}},
                                        \end{aligned}

                                    for further information see e. g. [#f1]_

        :type optimality_criterion: str

        Depending on the discretization method specified in
        `discretization_method`, the following parameters can be used
        for further specification:

        :param collocation_scheme: optional, scheme used for setting up the
                                   collocation polynomials,
                                   possible values are `radau` (default)
                                   and `legendre`
        :type collocation_scheme: str

        :param number_of_collocation_points: optional, order of collocation
                                             polynomials
                                             :math:`d \in \mathbb{Z}` (default
                                             values is 3)
        :type number_of_collocation_points: int


        :param integrator: optional, integrator to be used with multiple shooting.
                           See the CasADi documentation for a list of
                           all available integrators. As a default, `cvodes`
                           is used.
        :type integrator: str

        :param integrator_options: optional, options to be passed to the CasADi
                                   integrator used with multiple shooting
                                   (see the CasADi documentation for a list of
                                   all possible options)
        :type integrator_options: dict

        You do not need to specify initial guesses for the estimated states,
        since these are obtained with a system simulation using the initial
        states and the provided initial guesses for the controls.

        The resulting optimization problem has the following form:

        .. math::

            \begin{aligned}
                \text{arg}\,\underset{u, q, x}{\text{min}} & & I(\Sigma_{\text{p}}(x, u, q; p)) &\\
                \text{subject to:} & & g(x, u, q; p) & = 0\\
                & & u_\text{min} \leq u_\text{k} & \leq u_\text{max} \hspace{1cm} k = 1, \dots, N-1\\
                & & x_\text{min} \leq x_\text{k}  & \leq x_\text{max} \hspace{1cm} k = 1, \dots, N\\
                & & x_1 \leq x(t_1) & \leq x_1
            \end{aligned}

        where :math:`\Sigma_p = \text{Cov}(p)` and :math:`g(\cdot)` contains the
        discretized system dynamics
        according to the specified discretization method. If the system is
        non-dynamic, it only contains the user-provided equality constraints.

        .. rubric:: References

        .. [#f1] |linkf1|_
        
        .. _linkf1: http://ginger.iwr.uni-heidelberg.de/vplan/images/5/54/Koerkel2002.pdf

        .. |linkf1| replace:: *Körkel, Stefan: Numerische Methoden für Optimale Versuchsplanungsprobleme bei nichtlinearen DAE-Modellen, PhD Thesis, Heidelberg university, 2002, pages 74/75.*

        '''

        intro()

        self._discretize_system( \
            system, time_points, discretization_method, **kwargs)

        self._set_parameter_guess(pdata)

        self._apply_parameters_to_discretization()

        self._set_optimization_variables()

        self._set_optimization_variables_initials(qinit, x0, uinit)

        self._set_optimization_variables_lower_bounds(umin, qmin, xmin, x0)

        self._set_optimization_variables_upper_bounds(umax, qmax, xmax, x0)

        self._set_measurement_data()

        self._set_weightings(wv, weps_u)

        self._set_measurement_deviations()

        self._set_cov_matrix_derivative_directions()

        self._setup_constraints()

        self._setup_gauss_newton_lagrangian_hessian()

        self._setup_covariance_matrix()

        self._setup_covariance_matrix_for_evaluation()

        self._set_optimiality_criterion(optimality_criterion)

        self._setup_objective()

        self._apply_parameters_to_objective()

        self._setup_nlp()


    def _print_experimental_properties(self, covariance_matrix):

        np.set_printoptions(linewidth = 200, \
            formatter={'float': lambda x: format(x, ' 10.8e')})

        print("\nParameters p_i:")

        for k, pk in enumerate(self._pdata):

            print("    p_{0:<3} = {1} +/- {2}".format( \
                 k, pk, ci.sqrt(abs(ci.diag(covariance_matrix)[k]))))

        print("\nCovariance matrix for this setup:")

        print(np.atleast_2d(covariance_matrix))


    def _compute_initial_covariance_matrix(self):

        covariance_matrix_initial_input = ci.veccat([ \

                self._pdata,
                self._optimization_variables_initials,
                np.zeros(self._discretization.optimization_variables["EPS_U"].shape)

            ])


        self._covariance_matrix_initial = self._covariance_matrix_fcn( \
            [covariance_matrix_initial_input])[0]



    def _compute_optimized_covariance_matrix(self):

        covariance_matrix_optimized_input = ci.veccat([ \

                self._pdata,
                self.design_results["x"],
                np.zeros(self._discretization.optimization_variables["EPS_U"].shape)

            ])

        self._covariance_matrix_optimized = self._covariance_matrix_fcn( \
            [covariance_matrix_optimized_input])[0]


    def plot_confidence_ellipsoids(self, properties = "initial"):

        r'''
        :param properties: Set whether the experimental properties for the
                           initial setup ("initial", default), the optimized setup
                           ("optimized") or for both setups ("all") shall be
                           plotted. In the later case, both ellipsoids for one
                           pair of parameters will be displayed within one plot.
        :type properties: str

        Plot confidence ellipsoids for all parameter pairs. 
        Since the number of plots is possibly big, all plots will be saved
        within a folder *confidence_ellipsoids_scriptname* in you current
        working directory rather than being displayed directly.

        '''

        self._plot_confidence_ellipsoids(pdata = self._pdata, \
            properties = properties)
예제 #19
0
     '--motion-noise',
     dest='m_noise',
     required=False,
     type=float,
     help='The amount of motion noise to add to the particle filter.')
 parser.add_argument(
     '--ignore-regions',
     dest='ignore_regions',
     action='store_true',
     help='Set this flag to disable using the region probabilities.')
 args = parser.parse_args()
 config = get_pf_config(args.config_file)
 building_map = BuildingMap(args.map_data)
 # If this is a feed generator run, start the user simulator.
 if args.make_feed:
     simulation = Simulation(building_map, args.feed)
     w = DisplayWindow(building_map, args.map_image, sim=simulation)
     w.start_make_feed()
 # Otherwise, run the particle filter on an existing feed.
 else:
     display_on = False if args.no_disp else True
     c_noise = args.c_noise if args.c_noise else 0
     m_noise = args.m_noise if args.m_noise else 0
     feed_processor = FeedProcessor(args.feed,
                                    args.loop_feed,
                                    c_noise,
                                    m_noise,
                                    ignore_regions=args.ignore_regions)
     pf = ParticleFilter(config, building_map, feed_processor)
     w = DisplayWindow(building_map, args.map_image, pf, display=display_on)
     w.start_particle_filter()
예제 #20
0
파일: run2.py 프로젝트: baubie/DTNSpecies
from sim import Simulation
import network
import neuron
import netshow as ns
import progress

# Create our network
networks = {}
networks["C"] = network.DTN_Coincidence()
networks["AC"] = network.DTN_AntiCoincidence()


# Initialize the simulation with the network
s = {}
for net in networks:
    s[net] = Simulation(networks[net])
    s[net].verbose = False
    s[net].sim_time = 100
    s[net].dt = 0.025


# Run the simulations
stims = [i for i in range(1,31,1)]
repeats = 15
param = [(i*2)+10 for i in range(11)]

total = len(stims)*len(param)*repeats
print "Running %d simulations..." % total
count = 0

for a in param: 
예제 #21
0
number_of_calculated_models = 0
total_number_of_models = total_number_of_models * 60

for n in number_of_samples:
    number_of_samples_text = str(n)
    for seed in seeds:
        seed_text = str(seed)
        for length in sample_length_values:
            sample_length_text = str(length)
            rand = random.Random()
            for wd, mc in window_device.items():
                w = wd[0]
                d = wd[1]
                sample_gen = SampleGen(mc, rand)
                simulator = Simulation(sample_gen)
                # time used to generate all samples
                rand.seed(seed)
                sample_generation_time = 0
                for i in range(n):
                    start = time.time()
                    sample_gen.sample(length)
                    end = time.time()
                    sample_generation_time += end - start
                rand.seed(seed)
                # evaluate the MC given the current combination of parameters
                start = time.time()
                # performance indicator 1 - prediction accuracy
                accuracy = simulator.runSimulation(mc, n, length, is_dpm)
                end = time.time()
                # performance indicator 2 - execution time
예제 #22
0
파일: test.py 프로젝트: iGEM-QSF/SimCircus
from sim import Simulation

sirkus = Simulation()
sirkus.testY()
예제 #23
0
def sim(nt, dri):
    ip = InputParameters(nt, dri)
    s = Simulation(ip)
    s.run()
예제 #24
0
#! /usr/bin/env python3

from sim import Simulation

if __name__ == "__main__":

    sim = Simulation(ants=5, x=100, y=100, num_rivers=20)

    sim.run()
예제 #25
0
파일: doe.py 프로젝트: MannyKayy/casiopeia
class DoE(DoEProblem):

    '''The class :class:`casiopeia.doe.DoE` is used to set up
    Design-of-Experiments-problems for systems defined with the
    :class:`casiopeia.system.System` class.

    The aim of the experimental design optimization is to identify a set of
    controls that can be used for the generation of measurement data which
    allows for a better estimation of the unknown parameters of a system.

    To achieve this, an information function on the covariance matrix of the
    estimated parameters is minimized. The values of the estimated parameters,
    though they are mostly an initial
    guess for their values, are not changed during the optimization.

    Optimum experimental design and parameter estimation methods can be used
    interchangeably until a desired accuracy of the parameters has been
    achieved.
    '''

    @property
    def optimized_controls(self):

        return self.design_results["x"][ \
            :(self._discretization.number_of_intervals * \
                self._discretization.system.nu)]


    def _discretize_system(self, system, time_points, discretization_method, \
        **kwargs):

        if system.nx == 0:

            self._discretization = NoDiscretization(system, time_points)

        elif system.nx != 0:

            if discretization_method == "collocation":

                self._discretization = ODECollocation( \
                    system, time_points, **kwargs)

            elif discretization_method == "multiple_shooting":

                self._discretization = ODEMultipleShooting( \
                    system, time_points, **kwargs)

            else:

                raise NotImplementedError('''
Unknown discretization method: {0}.
Possible values are "collocation" and "multiple_shooting".
'''.format(str(discretization_method)))
      

    def _set_parameter_guess(self, pdata):

        self._pdata = inputchecks.check_parameter_data(pdata, \
            self._discretization.system.np)


    def _apply_parameters_to_equality_constraints(self):

        optimization_variables_for_equality_constraints = ci.veccat([ \

                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"], 
                self._discretization.optimization_variables["EPS_U"], 
                self._discretization.optimization_variables["P"], 

            ])

        optimization_variables_parameters_applied = ci.veccat([ \

                self._discretization.optimization_variables["U"], 
                self._discretization.optimization_variables["Q"], 
                self._discretization.optimization_variables["X"], 
                ci.mx(*self._discretization.optimization_variables["EPS_U"].shape), 
                self._pdata, 

            ])

        equality_constraints_fcn = ci.mx_function( \
            "equality_constraints_fcn", \
            [optimization_variables_for_equality_constraints], \
            [self._discretization.equality_constraints])

        self._equality_constraints_parameters_applied = \
            equality_constraints_fcn(optimization_variables_parameters_applied)


    def _apply_parameters_to_discretization(self):

        self._apply_parameters_to_equality_constraints()


    def _set_optimization_variables(self):

        self._optimization_variables = ci.veccat([ \

                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],

            ])


    def _set_optimization_variables_initials(self, qinit, x0, uinit):

        self.simulation = Simulation(self._discretization.system, \
            self._pdata, qinit)
        self.simulation.run_system_simulation(x0, \
            self._discretization.time_points, uinit, print_status = False)
        xinit = self.simulation.simulation_results

        repretitions_xinit = \
            self._discretization.optimization_variables["X"][:,:-1].shape[1] / \
                self._discretization.number_of_intervals
        
        Xinit = ci.repmat(xinit[:, :-1], repretitions_xinit, 1)

        Xinit = ci.horzcat([ \

            Xinit.reshape((self._discretization.system.nx, \
                Xinit.numel() / self._discretization.system.nx)),
            xinit[:, -1],

            ])

        uinit = inputchecks.check_controls_data(uinit, \
            self._discretization.system.nu, \
            self._discretization.number_of_intervals)
        Uinit = uinit

        qinit = inputchecks.check_constant_controls_data(qinit, \
            self._discretization.system.nq)
        Qinit = qinit

        self._optimization_variables_initials = ci.veccat([ \

                Uinit,
                Qinit,
                Xinit,

            ])


    def _set_optimization_variables_lower_bounds(self, umin, qmin, xmin, x0):

        umin_user_provided = umin

        umin = inputchecks.check_controls_data(umin, \
            self._discretization.system.nu, 1)

        if umin_user_provided is None:

            umin = -np.inf * np.ones(umin.shape)

        Umin = ci.repmat(umin, 1, \
            self._discretization.optimization_variables["U"].shape[1])


        qmin_user_provided = qmin

        qmin = inputchecks.check_constant_controls_data(qmin, \
            self._discretization.system.nq)

        if qmin_user_provided is None:

            qmin = -np.inf * np.ones(qmin.shape)

        Qmin = qmin


        xmin_user_provided = xmin

        xmin = inputchecks.check_states_data(xmin, \
            self._discretization.system.nx, 0)

        if xmin_user_provided is None:

            xmin = -np.inf * np.ones(xmin.shape)

        Xmin = ci.repmat(xmin, 1, \
            self._discretization.optimization_variables["X"].shape[1])

        Xmin[:,0] = x0


        self._optimization_variables_lower_bounds = ci.veccat([ \

                Umin,
                Qmin,
                Xmin,

            ])


    def _set_optimization_variables_upper_bounds(self, umax, qmax, xmax, x0):

        umax_user_provided = umax

        umax = inputchecks.check_controls_data(umax, \
            self._discretization.system.nu, 1)

        if umax_user_provided is None:

            umax = np.inf * np.ones(umax.shape)

        Umax = ci.repmat(umax, 1, \
            self._discretization.optimization_variables["U"].shape[1])


        qmax_user_provided = qmax

        qmax = inputchecks.check_constant_controls_data(qmax, \
            self._discretization.system.nq)

        if qmax_user_provided is None:

            qmax = -np.inf * np.ones(qmax.shape)

        Qmax = qmax


        xmax_user_provided = xmax

        xmax = inputchecks.check_states_data(xmax, \
            self._discretization.system.nx, 0)

        if xmax_user_provided is None:

            xmax = np.inf * np.ones(xmax.shape)

        Xmax = ci.repmat(xmax, 1, \
            self._discretization.optimization_variables["X"].shape[1])

        Xmax[:,0] = x0


        self._optimization_variables_upper_bounds = ci.veccat([ \

                Umax,
                Xmax,

            ])


    def _set_measurement_data(self):

        # The DOE problem does not depend on actual measurement values,
        # the measurement deviations are only needed to set up the objective;
        # therefore, dummy-values for the measurements can be used
        # (see issue #7 for further information)

        measurement_data = np.zeros((self._discretization.system.nphi, \
            self._discretization.number_of_intervals + 1))

        self._measurement_data_vectorized = ci.vec(measurement_data)


    def _set_weightings(self, wv, weps_u):

        input_error_weightings = \
            inputchecks.check_input_error_weightings(weps_u, \
            self._discretization.system.neps_u, 
            self._discretization.number_of_intervals)

        measurement_weightings = \
            inputchecks.check_measurement_weightings(wv, \
            self._discretization.system.nphi, \
            self._discretization.number_of_intervals + 1)

        self._weightings_vectorized = ci.veccat([ \

            input_error_weightings, 
            measurement_weightings,

            ])


    def _set_measurement_deviations(self):

        self._measurement_deviations = ci.vertcat([ \

                ci.vec(self._discretization.measurements) - \
                self._measurement_data_vectorized + \
                ci.vec(self._discretization.optimization_variables["V"])

            ])


    def _setup_constraints(self):

        self._constraints = ci.vertcat([ \

                self._measurement_deviations,
                self._discretization.equality_constraints,

            ])


    def _set_cov_matrix_derivative_directions(self):

        # These correspond to the optimization variables of the parameter
        # estimation problem; the evaluation of the covariance matrix, though,
        # does not depend on the actual values of V, EPS_E and EPS_U, and with
        # this, the DoE problem does not

        self._cov_matrix_derivative_directions = ci.veccat([ \

                self._discretization.optimization_variables["P"],
                self._discretization.optimization_variables["X"],
                self._discretization.optimization_variables["V"],
                self._discretization.optimization_variables["EPS_U"],

            ])


    def _setup_gauss_newton_lagrangian_hessian(self):

        gauss_newton_lagrangian_hessian_diag = ci.vertcat([ \
            ci.mx(self._cov_matrix_derivative_directions.shape[0] - \
                self._weightings_vectorized.shape[0], 1), \
            self._weightings_vectorized])

        self._gauss_newton_lagrangian_hessian = ci.diag( \
            gauss_newton_lagrangian_hessian_diag)


    def _setup_covariance_matrix_for_evaluation(self):

        covariance_matrix_free_variables = ci.veccat([ \

                self._discretization.optimization_variables["P"],
                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],
                self._discretization.optimization_variables["EPS_U"],

            ])

        self._covariance_matrix_fcn = ci.mx_function("covariance_matrix_fcn", \
            [covariance_matrix_free_variables], \
            [self._covariance_matrix.covariance_matrix])


    def _apply_parameters_to_objective(self):

        # As mentioned above, the objective does not depend on the actual
        # values of V, but on the values of P and EPS_U, while
        # P is fed from pdata, and EPS_U is supposed to be 0

        objective_free_variables = ci.veccat([ \

                self._discretization.optimization_variables["P"],
                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],
                self._discretization.optimization_variables["EPS_U"],

            ])

        objective_free_variables_parameters_applied = ci.veccat([ \

                self._pdata,
                self._discretization.optimization_variables["U"],
                self._discretization.optimization_variables["Q"],
                self._discretization.optimization_variables["X"],
                ci.mx(*self._discretization.optimization_variables["EPS_U"].shape),

            ])

        objective_fcn = ci.mx_function("objective_fcn", \
            [objective_free_variables], [self._objective_parameters_free])

        self._objective = objective_fcn( \
            objective_free_variables_parameters_applied)


    def __init__(self, system, time_points, \
        uinit = None, umin = None, umax = None, \
        qinit = None, qmin = None, qmax = None, \
        pdata = None, x0 = None, \
        xmin = None, xmax = None, \
        wv = None, weps_u = None, \
        discretization_method = "collocation", \
        optimality_criterion = "A", **kwargs):

        r'''
        :raises: AttributeError, NotImplementedError

        :param system: system considered for parameter estimation, specified
                       using the :class:`casiopeia.system.System` class
        :type system: casiopeia.system.System

        :param time_points: time points :math:`t_\text{N} \in \mathbb{R}^\text{N}`
                   used to discretize the continuous time problem. Controls
                   will be applied at the first :math:`N-1` time points,
                   while measurements take place at all :math:`N` time points.
        :type time_points: numpy.ndarray, casadi.DMatrix, list

        :param umin: optional, lower bounds of the time-varying controls
                   :math:`u_\text{min} \in \mathbb{R}^{\text{n}_\text{u}}`;
                   if not values are given, :math:`-\infty` will be used
        :type umin: numpy.ndarray, casadi.DMatrix

        :param umax: optional, upper bounds of the time-vaying controls 
                   :math:`u_\text{max} \in \mathbb{R}^{\text{n}_\text{u}}`;
                   if not values are given, :math:`\infty` will be used
        :type umax: numpy.ndarray, casadi.DMatrix

        :param uinit: optional, initial guess for the values of the time-varying controls
                   :math:`u_\text{N} \in \mathbb{R}^{\text{n}_\text{u} \times \text{N}-1}`
                   that (might) change at the switching time points;
                   if no values are given, 0 will be used; note that a poorly
                   or wrongly chosen initial guess can cause the optimization
                   to fail, and note that the
                   the second dimension of :math:`u_N` is :math:`N-1` and not
                   :math:`N`, since there is no control value applied at the
                   last time point
        :type uinit: numpy.ndarray, casadi.DMatrix

        :param qmin: optional, lower bounds of the time-constant controls
                   :math:`q_\text{min} \in \mathbb{R}^{\text{n}_\text{q}}`;
                   if not values are given, :math:`-\infty` will be used
        :type qmin: numpy.ndarray, casadi.DMatrix

        :param qmax: optional, upper bounds of the time-constant controls
                   :math:`q_\text{max} \in \mathbb{R}^{\text{n}_\text{q}}`;
                   if not values are given, :math:`\infty` will be used
        :type qmax: numpy.ndarray, casadi.DMatrix

        :param qinit: optional, initial guess for the optimal values of the
                   time-constant controls
                   :math:`q_\text{init} \in \mathbb{R}^{\text{n}_\text{q}}`;
                   if not values are given, 0 will be used; note that a poorly
                   or wrongly chosen initial guess can cause the optimization
                   to fail
        :type qinit: numpy.ndarray, casadi.DMatrix

        :param pdata: values of the time-constant parameters 
                      :math:`p \in \mathbb{R}^{\text{n}_\text{p}}`
        :type pdata: numpy.ndarray, casadi.DMatrix

        :param x0: state values :math:`x_0 \in \mathbb{R}^{\text{n}_\text{x}}`
                   at the first time point :math:`t_0`
        :type x0: numpy.ndarray, casadi.DMatrix, list

        :param xmin: optional, lower bounds of the states
                      :math:`x_\text{min} \in \mathbb{R}^{\text{n}_\text{x}}`;
                      if no value is given, :math:`-\infty` will be used
        :type xmin: numpy.ndarray, casadi.DMatrix

        :param xmax: optional, lower bounds of the states
                      :math:`x_\text{max} \in \mathbb{R}^{\text{n}_\text{x}}`;
                      if no value is given, :math:`\infty` will be used
        :type xmax: numpy.ndarray, casadi.DMatrix 

        :param wv: weightings for the measurements
                   :math:`w_\text{v} \in \mathbb{R}^{\text{n}_\text{y} \times \text{N}}`
        :type wv: numpy.ndarray, casadi.DMatrix

        :param weps_u: weightings for the input errors
                   :math:`w_{\epsilon_\text{u}} \in \mathbb{R}^{\text{n}_{\epsilon_\text{u}}}`
                   (only necessary
                   if input errors are used within ``system``)
        :type weps_u: numpy.ndarray, casadi.DMatrix    

        :param discretization_method: optional, the method that shall be used for
                                      discretization of the continuous time
                                      problem w. r. t. the time points given 
                                      in :math:`t_\text{N}`; possible values are
                                      "collocation" (default) and
                                      "multiple_shooting"
        :type discretization_method: str

        :param optimality_criterion: optional, the information function
                                    :math:`I_\text{X}(\cdot)` to be used on the 
                                    covariance matrix, possible values are
                                    `A` (default) and `D`, while

                                    .. math ::

                                        \begin{aligned}
                                          I_\text{A}(\Sigma_\text{p}) & = \frac{1}{n_\text{p}} \text{Tr}(\Sigma_\text{p}),\\
                                          I_\text{D}(\Sigma_\text{p}) & = \begin{vmatrix} \Sigma_\text{p} \end{vmatrix} ^{\frac{1}{n_\text{p}}},
                                        \end{aligned}

                                    for further information see e. g. [#f1]_

        :type optimality_criterion: str

        Depending on the discretization method specified in
        `discretization_method`, the following parameters can be used
        for further specification:

        :param collocation_scheme: optional, scheme used for setting up the
                                   collocation polynomials,
                                   possible values are `radau` (default)
                                   and `legendre`
        :type collocation_scheme: str

        :param number_of_collocation_points: optional, order of collocation
                                             polynomials
                                             :math:`d \in \mathbb{Z}` (default
                                             values is 3)
        :type number_of_collocation_points: int


        :param integrator: optional, integrator to be used with multiple shooting.
                           See the CasADi documentation for a list of
                           all available integrators. As a default, `cvodes`
                           is used.
        :type integrator: str

        :param integrator_options: optional, options to be passed to the CasADi
                                   integrator used with multiple shooting
                                   (see the CasADi documentation for a list of
                                   all possible options)
        :type integrator_options: dict

        You do not need to specify initial guesses for the estimated states,
        since these are obtained with a system simulation using the initial
        states and the provided initial guesses for the controls.

        The resulting optimization problem has the following form:

        .. math::

            \begin{aligned}
                \text{arg}\,\underset{u, q, x}{\text{min}} & & I(\Sigma_{\text{p}}(x, u, q; p)) &\\
                \text{subject to:} & & g(x, u, q; p) & = 0\\
                & & u_\text{min} \leq u_\text{k} & \leq u_\text{max} \hspace{1cm} k = 1, \dots, N-1\\
                & & x_\text{min} \leq x_\text{k}  & \leq x_\text{max} \hspace{1cm} k = 1, \dots, N\\
                & & x_1 \leq x(t_1) & \leq x_1
            \end{aligned}

        where :math:`\Sigma_p = \text{Cov}(p)` and :math:`g(\cdot)` contains the
        discretized system dynamics
        according to the specified discretization method. If the system is
        non-dynamic, it only contains the user-provided equality constraints.

        .. rubric:: References

        .. [#f1] |linkf1|_
        
        .. _linkf1: http://ginger.iwr.uni-heidelberg.de/vplan/images/5/54/Koerkel2002.pdf

        .. |linkf1| replace:: *Körkel, Stefan: Numerische Methoden für Optimale Versuchsplanungsprobleme bei nichtlinearen DAE-Modellen, PhD Thesis, Heidelberg university, 2002, pages 74/75.*

        '''

        intro()

        self._discretize_system( \
            system, time_points, discretization_method, **kwargs)

        self._set_parameter_guess(pdata)

        self._apply_parameters_to_discretization()

        self._set_optimization_variables()

        self._set_optimization_variables_initials(qinit, x0, uinit)

        self._set_optimization_variables_lower_bounds(umin, qmin, xmin, x0)

        self._set_optimization_variables_upper_bounds(umax, qmax, xmax, x0)

        self._set_measurement_data()

        self._set_weightings(wv, weps_u)

        self._set_measurement_deviations()

        self._set_cov_matrix_derivative_directions()

        self._setup_constraints()

        self._setup_gauss_newton_lagrangian_hessian()

        self._setup_covariance_matrix()

        self._setup_covariance_matrix_for_evaluation()

        self._set_optimiality_criterion(optimality_criterion)

        self._setup_objective()

        self._apply_parameters_to_objective()

        self._setup_nlp()


    def _print_experimental_properties(self, covariance_matrix):

        np.set_printoptions(linewidth = 200, \
            formatter={'float': lambda x: format(x, ' 10.8e')})

        print("\nParameters p_i:")

        for k, pk in enumerate(self._pdata):

            print("    p_{0:<3} = {1} +/- {2}".format( \
                 k, pk, ci.sqrt(abs(ci.diag(covariance_matrix)[k]))))

        print("\nCovariance matrix for this setup:")

        print(np.atleast_2d(covariance_matrix))


    def _compute_initial_covariance_matrix(self):

        covariance_matrix_initial_input = ci.veccat([ \

                self._pdata,
                self._optimization_variables_initials,
                np.zeros(self._discretization.optimization_variables["EPS_U"].shape)

            ])


        self._covariance_matrix_initial = self._covariance_matrix_fcn( \
            covariance_matrix_initial_input)



    def _compute_optimized_covariance_matrix(self):

        covariance_matrix_optimized_input = ci.veccat([ \

                self._pdata,
                self.design_results["x"],
                np.zeros(self._discretization.optimization_variables["EPS_U"].shape)

            ])

        self._covariance_matrix_optimized = self._covariance_matrix_fcn( \
            covariance_matrix_optimized_input)


    def plot_confidence_ellipsoids(self, properties = "initial"):

        r'''
        :param properties: Set whether the experimental properties for the
                           initial setup ("initial", default), the optimized setup
                           ("optimized") or for both setups ("all") shall be
                           plotted. In the later case, both ellipsoids for one
                           pair of parameters will be displayed within one plot.
        :type properties: str

        Plot confidence ellipsoids for all parameter pairs. 
        Since the number of plots is possibly big, all plots will be saved
        within a folder *confidence_ellipsoids_scriptname* in you current
        working directory rather than being displayed directly.

        '''

        self._plot_confidence_ellipsoids(pdata = self._pdata, \
            properties = properties)
예제 #26
0
def my_calculation(arguments):
    """
    This function returns the assets after death for the given arguments.
    """
    args, rate_of_return, years_to_wait = arguments

    #
    # Calculate the most efficient Roth conversion amount.
    #
    most_assets = 0
    roth_conversion_amount = 0
    best_roth_conversion_amount = 0

    while True:
        simulation = Simulation(
            args.starting_balance_hsa,
            args.starting_balance_taxable,
            args.starting_balance_trad_401k,
            args.starting_balance_trad_ira,
            args.starting_balance_roth_401k,
            args.starting_balance_roth_ira,
            rate_of_return,
            years_to_wait,
            args.current_age,
            args.age_of_retirement,
            args.age_to_start_rmds,
            args.age_of_death,
            roth_conversion_amount,  # this is our variable
            args.income,
            args.yearly_income_raise,
            args.max_income,
            args.age_of_marriage,
            args.spending,
            args.contribution_limit_hsa,
            args.contribution_catch_up_amount_hsa,
            args.contribution_catch_up_age_hsa,
            args.contribution_limit_401k,
            args.contribution_limit_401k_total,
            args.contribution_catch_up_amount_401k,
            args.contribution_catch_up_age_401k,
            args.contribution_limit_ira,
            args.contribution_catch_up_amount_ira,
            args.contribution_catch_up_age_ira,
            args.do_mega_backdoor_roth,
            args.work_state,
            args.retirement_state,
            args.add_dependent,
            args.public_safety_employee,
            args.employer_match_401k,
            args.max_contribution_percentage_401k,
            args.employer_contribution_hsa)
        simulation.simulate()

        if round(simulation.get_total_assets_after_death(), 2) >= round(
                most_assets, 2):
            best_roth_conversion_amount = roth_conversion_amount
            most_assets = simulation.get_total_assets_after_death()

        traditional_money = (simulation.accounts.trad_401k.get_value() +
                             simulation.accounts.trad_ira.get_value())
        if round(traditional_money, 2) == 0:
            break
        roth_conversion_amount += args.roth_conversion_unit

    simulation = Simulation(
        args.starting_balance_hsa, args.starting_balance_taxable,
        args.starting_balance_trad_401k, args.starting_balance_trad_ira,
        args.starting_balance_roth_401k, args.starting_balance_roth_ira,
        rate_of_return, years_to_wait, args.current_age,
        args.age_of_retirement, args.age_to_start_rmds, args.age_of_death,
        best_roth_conversion_amount, args.income, args.yearly_income_raise,
        args.max_income, args.age_of_marriage, args.spending,
        args.contribution_limit_hsa, args.contribution_catch_up_amount_hsa,
        args.contribution_catch_up_age_hsa, args.contribution_limit_401k,
        args.contribution_limit_401k_total,
        args.contribution_catch_up_amount_401k,
        args.contribution_catch_up_age_401k, args.contribution_limit_ira,
        args.contribution_catch_up_amount_ira,
        args.contribution_catch_up_age_ira, args.do_mega_backdoor_roth,
        args.work_state, args.retirement_state, args.add_dependent,
        args.public_safety_employee, args.employer_match_401k,
        args.max_contribution_percentage_401k, args.employer_contribution_hsa)
    simulation.simulate()
    return simulation.get_total_assets_after_death()
예제 #27
0
 def startSim(self):
     self.sim[0] = Simulation(self.population, self.screen, self.clock)
예제 #28
0
파일: mice.py 프로젝트: KebertXela87/mvu
#!/usr/bin/env python

from parse import Parser
from sim import Simulation

#----- Main

parser = Parser("sampledata.csv")
simulation = Simulation()
simulation.setUp(parser.getData())
print("Running ...")
simulation.runFullSim()
print(simulation.getAllPaths())
print(simulation.getAllHeatData())
'''
print('This is a test!')
parser = Parser("sampledata.csv")
simulation = Simulation()
simulation.setUp(parser.getData())
done = False
while not done:
    print("(25) to see all paths thus far. (35) to see all heat data. (99) to quit.")
    print("-xx to completely run sim, in increments of xx.")
    print("Total time so far: " + str(simulation.getTotalTime()))
    inputStr = str(input("How long to move? "))
    if(inputStr[0] == '-'):
        simulation.oneStep(int(inputStr[1:]))

    if (int(inputStr) == 99):
        done = True
    elif (int(inputStr) == 25):
예제 #29
0
def run_sim(model):
    odefunction = lambda t, x: model.ode(t, x)
    sim_params = get_sim_parameters()
    sim = Simulation(sim_params)
    return sim.simulate(odefunction)