Esempio n. 1
0
def read_trj(trj_file):
    """
    return: Simulation
    parameters:
        trj_file: string | name of trj file
    """
    simulation = Simulation()
    with open(trj_file, 'r') as trj:

        while True:

            line = trj.readline()
            if not line:
                break
            lattice = Lattice()
            lattice.set_a(np.array(line.split(), dtype=float))
            lattice.set_b(np.array(trj.readline().split(), dtype=float))
            lattice.set_c(np.array(trj.readline().split(), dtype=float))

            configuration = Configuration(lattice=lattice)

            atom_types = trj.readline().split()
            atom_counts = np.array(trj.readline().split(), dtype=int)
            natom = np.sum(atom_counts)

            for i in xrange(natom):
                atom_record = trj.readline().split()
                atom_name = atom_record[0]
                atom_position = np.array(atom_record[1:], dtype=float)
                configuration.insert_atom(Atom(atom_name, atom_position))

            simulation.insert_configuration(configuration)

    return simulation
Esempio n. 2
0
def do_main(graph, teams, model):
  print "\n\nGraph:", graph, "  Teams:", teams

  # Create the adjacency list for the graph.
  adj_list = create_adj_list(graph)
  # Read in the node selection for each team.
  team_nodes = read_nodes(graph, adj_list.keys(), teams)

  # Run the simulation and output the run to file.
  simulation = Simulation(model, team_nodes, adj_list)
  (output, results) = simulation.run()
  output_filename = graph + "-" + str(time.time()) + ".txt"
  output_file = open(OUTPUT_FOLDER + output_filename, "w")
  output_file.write(str(json.dumps(output)))
  output_file.close()

  # Get the final results of teams to their nodes and update their points in
  # the database.
  scores = update_points(results)
  db.test.runs.insert({ \
    "teams": teams, \
    "scores": scores, \
    "graph": graph, \
    "file": output_filename \
  })
def test_spinstell_spinstell_ampa():
    netdata = TraubFullNetData()
    sim = Simulation('spinstell_spinstell_synapse')
    spinstell_index = netdata.celltype.index('SpinyStellate')
    pre = SpinyStellate(SpinyStellate.prototype, sim.model.path + '/SpinyStellate1')
    spinstell = SpinyStellate(SpinyStellate.prototype, sim.model.path + '/SpinyStellate2')
    precomp = pre.comp[SpinyStellate.presyn]
    postcomp = spinstell.comp[5] # 5 is among the allowed post synaptic compartments in spiny stellate cell
    synchan = precomp.makeSynapse(postcomp, 
                                  name='ampa_from_SPINSTELL', 
                                  classname='SynChan', 
                                  Ek=0.0,
                                  Gbar=netdata.g_ampa_baseline[spinstell_index][spinstell_index],
                                  tau1=netdata.tau_ampa[spinstell_index][spinstell_index],
                                  tau2=netdata.tau_ampa[spinstell_index][spinstell_index],
                                  delay = synapse.SYNAPTIC_DELAY_DEFAULT
                                  )
    stim = pre.soma.insertPulseGen('stimulus', sim.model, firstLevel=1e-9, firstDelay=200e-3, firstWidth=2e-3)
    pre_soma_tab = pre.soma.insertRecorder('stim', 'Vm', sim.data)
    ss_soma_tab = spinstell.soma.insertRecorder('post_soma', 'Vm', sim.data)
    ss_dend_tab = postcomp.insertRecorder('post_dend', 'Vm', sim.data)
    sim.schedule()
    sim.run(1.0)
    pylab.plot(numpy.linspace(0, 1.0, len(pre_soma_tab)), pre_soma_tab, label='pre_soma')
    pylab.plot(numpy.linspace(0, 1.0, len(ss_soma_tab)), ss_soma_tab, label='ss_soma')
    pylab.plot(numpy.linspace(0, 1.0, len(ss_dend_tab)), ss_dend_tab, label='ss_dend')
    pylab.legend()
    pylab.show()
Esempio n. 4
0
def main(argv):
    R = 500

    try:
        opts, args = getopt.getopt(argv, "hr:", ["help", "reps="])
    except getopt.GetoptError:
                sys.exit(2)
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage()
            sys.exit(2)
        if opt in ('-r', '-replications'):
            R = int(arg)

    # Instantiate ruler ideology parameters
    rid0 = float(np.random.beta(2, 2, 1))
    rid1 = float(np.random.beta(2, 2, 1))
    params0 = {'ideology': 1, 'quality': 0, 'seniority': 0}
    params1 = {'ideology': 1, 'quality': 0, 'seniority': 0}
    leonidasr = Ruler(rid0, params0)
    spartar = Army(3, 3, 4, 30, [2, 4], leonidasr)
    leonidasl = Ruler(rid1, params1)
    spartal = Army(3, 3, 4, 30, [4, 2], leonidasl)

    print('Replication: ruler0-params {}, \
                        ruler1-params {}'.
          format(rid0,
                 rid1))
    for oo in [True]:
        # print 'Inits: {}, Ordered: {}'.format(params, oo)
        population = Population().population
        sargs = {'R': R, 'method': 'none'}
        simp = Simulation(spartar, spartal, population, sargs)
        simp.run()
        simp.write()
Esempio n. 5
0
File: qmcpack.py Progetto: jyamu/qmc
    def __init__(self,**kwargs):
        if not 'sims' in kwargs:
            self.error('sims must be provided')
        #end if
        sims = kwargs['sims']
        self.sims = sims
        del kwargs['sims']
        files = set()
        for sim in sims:
            files = files | sim.files
        #end for
        kwargs['files'] = files

        inputs = []
        filenames = []
        for sim in sims:
            inputs.append(sim.input)
            filenames.append(sim.infile)
        #end for
        kwargs['input'] = BundledQmcpackInput(inputs=inputs,filenames=filenames)

        Simulation.__init__(self,**kwargs)
        deps = []
        for sim in sims:
            for dep in sim.dependencies:
                deps.append((dep.sim,'other'))
            #end for
        #end for
        self.depends(*deps)
Esempio n. 6
0
def explore(fpath):
    _, ext = splitext(fpath)
    ftype = 'data' if ext in ('.h5', '.hdf5') else 'simulation'
    print("Using %s file: '%s'" % (ftype, fpath))
    if ftype == 'data':
        globals_def, entities = entities_from_h5(fpath)
        data_source = H5Data(None, fpath)
        h5in, _, globals_data = data_source.load(globals_def, entities)
        h5out = None
        simulation = Simulation(globals_def, None, None, None, None,
                                entities.values(), None)
        period, entity_name = None, None
    else:
        simulation = Simulation.from_yaml(fpath)
        h5in, h5out, globals_data = simulation.load()
        period = simulation.start_period + simulation.periods - 1
        entity_name = simulation.default_entity
    entities = simulation.entities_map
    if entity_name is None and len(entities) == 1:
        entity_name = entities.keys()[0]
    if period is None and entity_name is not None:
        entity = entities[entity_name]
        period = max(entity.output_index.keys())
    eval_ctx = EvaluationContext(simulation, entities, globals_data, period,
                                 entity_name)
    try:
        c = Console(eval_ctx)
        c.run()
    finally:
        h5in.close()
        if h5out is not None:
            h5out.close()
Esempio n. 7
0
def sim_from_file(filename):
    """
    The purpose of this function is to:

    + open a file containing a pickled dictionary of input values to a simulation,

    + initialize the objects which the corresponding `py_qcode.Simulation` takes as input,
    
    + run the simulation, and 

    + save the results to a file of the same name as the input, with a different extension.  
    """
    #Obsolete, scavenging code for pickle-independent implementation
    #(you can't pickle functions).
    with open(filename,'r') as phil:
        sim_dict = pkl.load(phil)
    sim = Simulation(**sim_dict)
    sim.run()

    split_name = filename.split('.')
    try:
        file_prefix, file_ext = split_name
    except ValueError:
        raise ValueError('Filenames are assumed to be of the form'+\
        ' "prefix.ext".')

    output_name = '.'.join([file_prefix, 'out'])

    sim.save(output_name)
Esempio n. 8
0
def explore(fpath):
    _, ext = splitext(fpath)
    ftype = 'data' if ext in ('.h5', '.hdf5') else 'simulation'
    print("Using {} file: '{}'".format(ftype, fpath))
    if ftype == 'data':
        globals_def, entities = entities_from_h5(fpath)
        simulation = Simulation(globals_def, None, None, None, None,
                                entities.values(), 'h5', fpath, None)
        period, entity_name = None, None
    else:
        simulation = Simulation.from_yaml(fpath)
        # use output as input
        simulation.data_source = H5Source(simulation.data_sink.output_path)
        period = simulation.start_period + simulation.periods - 1
        entity_name = simulation.default_entity
    dataset = simulation.load()
    data_source = simulation.data_source
    data_source.as_fake_output(dataset, simulation.entities_map)
    data_sink = simulation.data_sink
    entities = simulation.entities_map
    if entity_name is None and len(entities) == 1:
        entity_name = entities.keys()[0]
    if period is None and entity_name is not None:
        entity = entities[entity_name]
        period = max(entity.output_index.keys())
    eval_ctx = EvaluationContext(simulation, entities, dataset['globals'],
                                 period, entity_name)
    try:
        c = Console(eval_ctx)
        c.run()
    finally:
        data_source.close()
        if data_sink is not None:
            data_sink.close()
Esempio n. 9
0
def square_toric_code_sim(size, error_rate, n_trials, filename):
    """
    This function is square in more than one sense; it does everything
    the most vanilla way possible, and it uses a square grid to define 
    the torus. You put in an integer size, an error rate and a number
    of trials to execute, and it produces a pickled dict containing 
    the input to a simulation object in a file.
    """
    
    sim_lattice = SquareLattice((size,size))
    sim_dual_lattice = SquareLattice((size,size), is_dual=True)
    sim_model = depolarizing_model(error_rate)
    sim_code = toric_code(sim_lattice, sim_dual_lattice)
    sim_decoder = mwpm_decoder(sim_lattice, sim_dual_lattice)
    sim_log_ops = toric_log_ops((size,size))

    sim_keys = ['lattice', 'dual_lattice', 'error_model', 'code', 
                            'decoder', 'logical_operators', 'n_trials']

    sim_values = [sim_lattice, sim_dual_lattice, sim_model, sim_code, 
                                sim_decoder, sim_log_ops, n_trials]
    
    sim_dict = dict(zip(sim_keys, sim_values))

    sim = Simulation(**sim_dict)
    sim.run()
    sim.save(filename + '.sim')
Esempio n. 10
0
def basic_test():
	test_geom = Geometry(1,1,[Rod(0,0,Dielectric(11.8), \
	occupancy_radius(0.3,1))])
	sim = Simulation('basic_test', test_geom)
	sim.run_simulation()
	sim.post_process()
	return True
Esempio n. 11
0
    def test_single_cell(cls):
        """Simulates a single superficial pyramidal regular spiking
        cell and plots the Vm and [Ca2+]"""

        config.LOGGER.info("/**************************************************************************")
        config.LOGGER.info(" *")
        config.LOGGER.info(" * Simulating a single cell: %s" % (cls.__name__))
        config.LOGGER.info(" *")
        config.LOGGER.info(" **************************************************************************/")
        sim = Simulation(cls.__name__)
        mycell = SupPyrRS(SupPyrRS.prototype, sim.model.path + "/SupPyrRS")
        config.LOGGER.info('Created cell: %s' % (mycell.path))
        vm_table = mycell.comp[SupPyrRS.presyn].insertRecorder('Vm_suppyrrs', 'Vm', sim.data)
        pulsegen = mycell.soma.insertPulseGen('pulsegen', sim.model, firstLevel=3e-10, firstDelay=50e-3, firstWidth=50e-3)

        sim.schedule()
        if mycell.has_cycle():
            config.LOGGER.warning("WARNING!! CYCLE PRESENT IN CICRUIT.")
        t1 = datetime.now()
        sim.run(200e-3)
        t2 = datetime.now()
        delta = t2 - t1
        if config.has_pylab:
            mus_vm = config.pylab.array(vm_table) * 1e3
            mus_t = linspace(0, sim.simtime * 1e3, len(mus_vm))
            try:
                nrn_vm = config.pylab.loadtxt('../nrn/mydata/Vm_deepLTS.plot')
                nrn_t = nrn_vm[:, 0]
                nrn_vm = nrn_vm[:, 1]
                config.pylab.plot(nrn_t, nrn_vm, 'y-', label='nrn vm')
            except IOError:
                print 'NEURON Data not available.'
            config.pylab.plot(mus_t, mus_vm, 'g-.', label='mus vm')
            config.pylab.legend()
            config.pylab.show()
Esempio n. 12
0
    def do_the_walk(self):
        sim_param = self.sim_parameters

        # current insertion genes: (new genes)
        current_insertion_gene = sim_param.num_genes + 1

        # start genome:
        current_genome = Genome.identity(sim_param.num_genes, sim_param.num_chr, name="G_0")

        # add copy number information to track orthologous/paralogous, when duplications are present:
        for chromosome in current_genome.chromosomes:
            chromosome.copy_number = [1] * len(chromosome.gene_order)
        current_copy_number = current_genome.gene_count()

        # do some pre-dups if necessary:
        if sim_param.pre_duplications > 0:
            for i in range(sim_param.pre_duplications):
                Simulation.apply_random_segmental_duplication(current_genome, range(1, param.duplication_length+1), current_copy_number)

        self.genomes.append(current_genome)

        for key in RandomWalk.number_of_events:
            self.events[key].append(0)

        # walk:
        for step in range(self.length):
            # apply random event on current;
            current_genome = current_genome.clone("G_%d" % (step+1))
            n_rearrangements, n_insertions, n_deletions, n_duplications, current_insertion_gene = \
                Simulation.apply_random_events(sim_param, current_genome, self.steps, current_insertion_gene, current_copy_number)
            for key, value in zip(RandomWalk.number_of_events,
                                  [self.steps, n_rearrangements, n_insertions, n_deletions, n_duplications]):
                self.events[key].append(value)
            self.genomes.append(current_genome)
 def skip_test_handling_page_table_full(self):
     Simulation._load_memory_accesses_from_file = stub_empty_method
     Simulation._load_replacement_algorithm = stub_empty_method
     s = Simulation('dummy path', 'dummy alg', 4)
     a = MemoryAccess('00000000', 'R', 0)
     e = PageTableFullException()
     s._handle_page_fault(e, a)
     assert s.page_fault_counter == 1
Esempio n. 14
0
 def run_simulations(self):
     """Run NUM_SIMULATIONS simulations"""
     self.results = []
     append = self.results.append
     for _ in xrange(self.num_simulations):
         simulation = Simulation(self.num_simulations, self.attrition, self.iterations_per_simulation, 
             self.promotion_bias, self.num_positions_at_level, self.bias_favors_this_gender)
         simulation.run()
         append(simulation.get_result())
Esempio n. 15
0
 def testPBAndDIncreaseWithLambda(self):
     print "======= TEST: PB and D increase when lambda increases ======="
     lambdas = [10, 20, 40, 80, 100]
     for lamb in lambdas:
         print "Lambda = " + str(lamb)
         sim = Simulation(0, 3, 1000, 20, 5, 30, "poisson", lamb, 0, 0)
         sim.simulate()
         print ""
     print "============================================================="
Esempio n. 16
0
File: pwscf.py Progetto: jyamu/qmc
 def __init__(self,**sim_args):
     has_group_atoms = 'group_atoms' in sim_args
     group_atoms = True
     if has_group_atoms:
         group_atoms = sim_args['group_atoms']
         del sim_args['group_atoms']
     #end if
     Simulation.__init__(self,**sim_args)
     if group_atoms and isinstance(self.system,PhysicalSystem):
         self.system.structure.group_atoms()
Esempio n. 17
0
 def __init__(self,**sim_args):
     has_group_atoms = 'group_atoms' in sim_args
     group_atoms = False
     if has_group_atoms:
         group_atoms = sim_args['group_atoms']
         del sim_args['group_atoms']
     #end if
     Simulation.__init__(self,**sim_args)
     if group_atoms and isinstance(self.system,PhysicalSystem):
         self.warn('requested grouping by atomic species, but pwscf does not group atoms anymore!')
Esempio n. 18
0
class Billiard:
	def new_game(self):
		self.scores = [0, 0]
		self.sim.add_ball(0, 0.5, 0.5, 0.0, 0.0)

		#build balls triangle
		for i in range(6):
			for j in range(i):
				self.sim.add_ball((i*(i-1))/2+j+1, 1.3+i*0.06, 0.5+j*0.06-i*0.03, 0.0, 0.0)
		
	def __init__(self):
		self.gui = Interface()
		self.gui.start()
		self.sim = Simulation()
		clock = pygame.time.Clock()
		self.gui.current_player = 0

		while not self.gui.done:
			current_player = self.gui.current_player

			#start new game if requested
			if self.gui.new_game_request:
				self.gui.new_game_request = False
				self.new_game()
			self.gui.balls = {}

			#has current player changed?
			if not self.gui.stable and self.sim.is_stable():
				current_player = (current_player+1)%2
				self.gui.current_player = current_player
			self.gui.stable = self.sim.is_stable()

			#update ball positions
			for label, ball in self.sim.balls.iteritems():
				self.gui.balls[label] = ball.pos

			#read shot command from interface and execute them
			if len(self.gui.shots) != 0:
				(angle, power) = self.gui.shots.pop()
				v = Conf.VMAX*power
				angle = (angle/180.0)*math.pi
				self.sim.balls[0].x_velocity = -v*math.sin(angle)/Conf.FPS
				self.sim.balls[0].y_velocity = -v*math.cos(angle)/Conf.FPS
			
			#check if player hit any pockets and update score
			res = self.sim.next_iter()
			if 0 in [p[0] for p in res]:
				self.sim.add_ball(0, 0.5, 0.5, 0.0, 0.0)
				self.scores[current_player] -= 1
			for ball, pocket in res:
				if ball != 0:
					self.scores[current_player] += 1
			self.gui.scores = self.scores

			clock.tick(Conf.FPS)
def test_simulation_can_have_a_room_and_roombas(room):
    roomba = Roomba()
    sim = Simulation(room=room, roombas=[roomba])


    assert sim.room is room
    assert sim.roombas == [roomba]
    sim.step()

    assert roomba.position != start_position
    assert roomba.angle != 90
Esempio n. 20
0
def bs2d_test():
	test_geom = wheel(1,1,5,0.3,0,Dielectric('si')\
	,priority='Occupancy')
	draw_geometry(test_geom,'test_2d')
	test_kspace = KSpace(2,x_res=50,y_res=50)
	sim = Simulation('test_2d',test_geom,test_kspace,numbands=5, \
	resolution=64)
	sim.runSimulation()
	sim.postProcess()
	draw_bandstructure('test_2d',test_kspace,5,filled=False)
	return True
Esempio n. 21
0
 def new_simulation(self):
     simulation = Simulation()
     simulation.set_callback("error", lambda line: self.console_write(line, "error"))
     simulation.set_callback("command-failed",
         lambda sequence, command:
             self.console_write("Command [{0}/{1}] {2} cannot be executed".format(
                                     command,
                                     sequence.get_commands_size(),
                                     sequence.commands[command]),
                                "error"))
     return simulation
Esempio n. 22
0
def evaluate_controller(controller):
    """
    Gives a score to the controller based on its landing
    performance, determined by the simulation outcome and
    the time elapsed.
    """
    sim = Simulation(controller)
    success, time = sim.simulate()
    if success:
        return time
    else:
        return -(time + 100)
Esempio n. 23
0
 def run_simulations(self):
     """Run NUM_SIMULATIONS simulations"""
     self.results = []
     for i in range(self.num_simulations):
         simulation = Simulation(
             self.num_simulations,
             self.attrition,
             self.iterations_per_simulation,
             self.promotion_bias,
             self.num_positions_list,
         )
         simulation.run()
         self.results.append(simulation.get_result())
Esempio n. 24
0
def find_distance(start_speed, end_speed):
    """
    Find the distance needed to slow down from start_speed to
    end_speed using maximum engine output.
    """
    sim = Simulation(None)
    sim.position = 0.0
    sim.speed = start_speed

    while sim.speed > end_speed:
        sim.step_simulate(1.0)
        # print 'position = %f  speed = %f' % (sim.position, sim.speed)

    return abs(sim.position)
Esempio n. 25
0
def state_rep_callback():
    global ball, hole, robot_front, robot_back

    fieldstate = Simulation([], ball, [], (640, 480))
    fieldstate.golf_hole = hole
    robot.pos = (robot_front.pos[0] + robot_back.pos[0])/2, (robot_front.pos[1]+robot_back.pos[1])/2
    robot.orientation = atan2(robot_front.pos[1] - robot_back.pos[1], robot_front.pos[0] - robot_back.pos[0])
    
    print "Ball pos: " + str(ball.pos)
    print "Ball speed: "+ str(ball.speed)

    move = ai.get_move(fieldstate)
    print move
    
    ai_server.move(move[0], move[1])
Esempio n. 26
0
def build_bpca(num_pcles=1024, radius=0.5, overlap=None, output=True):
    """
    Build a simple ballistic particle cluster aggregate by generating particle and
     allowing it to stick where it first intersects another particle.

     If overlap= is set to a value between 0 and 1, monomers will be allowed to overlap
     by 0.5*overlap*(radius1+radius2).
     """

     # TODO: add various radius distribution options (i.e. polydisperse)

    if overlap is not None:
        if (overlap<0.) or (overlap>1.):
            print('ERROR: overlap must be either None, or 0<overlap<1')
            return None

    sim = Simulation(max_pcles=num_pcles, debug=debug)
    sim.add( (0.,0.,0.), radius)

    # generate a "proposed" particle and trajectory, and see where it intersects the
    # aggregate. add the new particle at this point!
    for n in range(num_pcles-1):

        success = False
        while not success:

            if output: print('Generating particle %d of %d' % (n+2, num_pcles), end='\r')

            first = random_sphere() * max(sim.farthest() * 2.0, radius *4.)
            second = random_sphere() * max(sim.farthest() * 2.0, radius *4.)
            direction = (second - first)
            direction = direction/np.linalg.norm(direction)
            ids, hit = sim.intersect(first, direction, closest=True)
            if hit is None: continue

            # shift the origin along the line from the particle centre to the intersect
            new = hit + (hit-sim.pos[np.where(sim.id==ids)[0][0]])

            # Add to the simulation, checking for overlap with existing partilces (returns False if overlap detected)
            success = sim.check(new, radius)
            if not success: continue

            # if requested, move the monomer back an amount
            if overlap is not None:
                new = hit + (hit-sim.pos[ids])*(1.-overlap)

            sim.add(new, radius)


            # if proposed particle is acceptable, add to the sim and carry on
            if success & debug: print('Adding particle at distance %f' % np.linalg.norm(hit))

    return sim
Esempio n. 27
0
    def __init__(self, parent=None):
        super(MainWindow, self).__init__(parent)
        uic.loadUi('ui/simulation.ui', self)

        self.sim = Simulation(400, negotiate_interval=90, submit_interval=200)
        self.sim.add_jobs()
        self.sim.farm.groups.update_quota(self.sim.farm)

        self.timer = QtCore.QTimer(self)
        self.timer.timeout.connect(self.advance_interval)

        self.quitBtn.clicked.connect(self.close)
        self.stepBtn.clicked.connect(self.advance_interval)
        self.radioDepthFirst.toggled.connect(self.toggle_fill_algorithm)
        self.radioBreadthFirst.toggled.connect(self.toggle_fill_algorithm)
        self.startStop.clicked.connect(self.toggle_run)
        self.simspeedSlider.valueChanged.connect(self.change_speed)
        self.to_plot = set(g.name for g in self.sim.farm.groups.active_groups())
        self.all_groups = self.sim.display_order()

        # ms between firing timer
        self.period = 350
        self.auto_run = False

        self.qedit = ManageQueues(self.sim)
        self.quitBtn.clicked.connect(self.qedit.close)
        self.toolButton.clicked.connect(self.qedit.show)
        self.make_status_layout()
Esempio n. 28
0
def parse_sims(folders, algorithm_list, parameters, sim_group, calc_dcj_distance=False, parse_time=False):

    # if algorithm_list is None:
    #     # TODO: auto-detect?
    #     pass
    all_results = collections.defaultdict(lambda: collections.defaultdict(list))
    time_results = collections.defaultdict(lambda: collections.defaultdict(list))
    for idx,folder in enumerate(folders):
        sim = Simulation.open_folder(folder)
        # set tree distances:
        algorithms.set_all_tree_distances(sim.sim_tree)
        # define simulation parameter label:
        sim.sim_parameters.folder = folder
        sim.sim_parameters.idx = idx
        # indel perc
        sim.sim_parameters.indel_p = sim.sim_parameters.insertion_p + sim.sim_parameters.deletion_p
        sim_label = sim_group % sim.sim_parameters.__dict__

        try:
            for alg in algorithm_list:

                label, method, location = alg.split(",")
                # Get ancestral genomes:
                reconstructed = file_ops.load_ancestral_genomes(folder, method, location)

                algo_res = get_ancestral_stats(sim.ancestral_genomes, reconstructed, sim.sim_tree,
                                               calc_tree_distances=True, calc_dcj_distance=calc_dcj_distance)
                if parse_time:
                    # convention: timefile is method.time, for instance, "ringo.time" or "mgra.time"
                    time_results[label][sim_label].append(parse_filetime(os.path.join(folder,location,"%s.time" % method.lower())))
                all_results[label][sim_label].extend(algo_res)
        except (RuntimeError,KeyError,IOError):
            print >> sys.stderr, "Results not present for all methods on folder %s, skipping..." % folder

    return all_results, time_results
Esempio n. 29
0
def generate_vasp(**kwargs):
    sim_args, inp_args = Simulation.separate_inputs(kwargs, copy_pseudos=False)

    sim_args.input = generate_vasp_input(**inp_args)
    vasp = Vasp(**sim_args)

    return vasp
Esempio n. 30
0
def gen_properties(**kwargs):

    if 'systype' not in kwargs:
        error('systype is a required input','gen_properties')
    #end if
    systype = kwargs['systype']
    del kwargs['systype']

    if systype=='molecule_qmc':
        text = molecule_text
    elif systype=='periodic_qmc':
        text = periodic_text
    else:
        error('invalid systype encountered\nsystype provided: {0}\nvalid options are: molecule_qmc, periodic_qmc'.format(systype))
    #end if

    sim_args,inp_args = Simulation.separate_inputs(kwargs)
    if len(inp_args)>0:
        error('invalid arguments encountered\ninvalid keywords: {0}'.format(sorted(inp_args.keys())),'gen_properties')
    #end if
    if not 'input' in sim_args:
        sim_args.input = input_template_dev(text=text.strip())
    #end if
    prop = Properties_1(**sim_args)
    return prop
Esempio n. 31
0
from simulation import Simulation

f = open("data.dat", 'w')
sum_balance = 0
min_balance = 0
min_running_balance = 0
for i in range(1000):
    sim = Simulation(8)
    res = sim.run()
    sum_balance += res
    min_balance = min(min_balance, res)
    min_running_balance = min(min_running_balance, sum_balance)
    f.write(str(res) + "\n")

print("Final balance: " + str(sum_balance))
#print("Min running balance: " + str(min_running_balance))
#print("Min balance: " + str(min_balance))
Esempio n. 32
0
packages_needed = ['matplotlib', 'scipy']
for pack in packages_needed:
    check_if_pack_installed(pack)

from simulation import Simulation

# Simulation params
params = dict()
params['N'] = 100
params['w'] = 0.2
params['burn_in'] = 2500
params['rnd_seed'] = 10378

# Create and init simulation
sim = Simulation()
sim.set_params(params)

run_setup = dict()
run_setup['mc_steps'] = 20000
run_setup['prior_dist'] = 'gaussian'
run_setup['prop_dist'] = 'gaussian_all'

# RUN #1

# Use of prior distribution
sim.init_use_prior_pdf(run_setup['prior_dist'])
# Run the simulation a certain number of steps
start = time.time()
sim.run(run_setup['mc_steps'], run_setup['prop_dist'])
end = time.time()
Esempio n. 33
0
def TriHoles2D(material,
               radius,
               numbands=8,
               k_interpolation=11,
               resolution=32,
               mesh_size=7,
               runmode='sim',
               num_processors=2,
               save_field_patterns=True,
               convert_field_patterns=True,
               containing_folder='./',
               job_name_suffix='',
               bands_title_appendix='',
               custom_k_space=None,
               modes=('te', 'tm')):
    """Create a 2D MPB Simulation of a triangular lattice of holes.

    :param material: can be a string (e.g. SiN,
    4H-SiC-anisotropic_c_in_z; defined in data.py) or just the epsilon
    value (float)
    :param radius: the radius of holes in units of the lattice constant
    :param numbands: number of bands to calculate
    :param k_interpolation: number of the k-vectors between every two of
    the used high symmetry points Gamma, M, K and Gamma again, so the
    total number of simulated k-vectors will be 3*k_interpolation + 4.
    Only used if no custom_custom_k_space is provided.
    :param resolution: described in MPB documentation
    :param mesh_size: described in MPB documentation
    :param runmode: can be one of the following:
        ''       : just create and return the simulation object
        'ctl'    : create the sim object and save the ctl file
        'sim' (default): run the simulation and do all postprocessing
        'postpc' : do all postprocessing; simulation should have run
                   before!
        'display': display all pngs done during postprocessing. This is
                   the only mode that is interactive.
    :param num_processors: number of processors used during simulation
    :param save_field_patterns: indicates whether field pattern h5 files
    are generated during the simulation (at points of high symmetry)
    :param convert_field_patterns: indicates whether field pattern h5
    files should be converted to png (only when postprocessing)
    :param containing_folder: the path to the folder which will contain
    the simulation subfolder.
    :param job_name_suffix: Optionally specify a job_name_suffix
    (appendix to the folder name etc.) which will be appended to the
    jobname created automatically from the most important parameters.
    :param bands_title_appendix: will be added to the title of the bands
    diagram.
    :param custom_k_space: By default, KSpaceTriangular with
    k_interpolation interpolation steps are used. Provide any KSpace
    object here to customize this. k_interpolation will then be ignored.
    :param modes: a list of modes to run. Possible are 'te' and 'tm'.
    Default: both
    :return: the Simulation object

    """
    mat = Dielectric(material)

    geom = Geometry(width=1,
                    height=1,
                    triangular=True,
                    objects=[Rod(x=0, y=0, material='air', radius=radius)])

    if isinstance(custom_k_space, KSpace):
        kspace = custom_k_space
    else:
        kspace = KSpaceTriangular(k_interpolation=k_interpolation,
                                  use_uniform_interpolation=defaults.newmpb)

    # points of interest: (output mode patterns at these points)
    if save_field_patterns:
        poi = kspace.points()[0:-1]
    else:
        poi = []

    runcode = ''
    for mode in modes:
        if mode == 'te':
            outputfunc = ' '.join(defaults.output_funcs_te)
        else:
            outputfunc = ' '.join(defaults.output_funcs_tm)
        runcode += ('(run-%s %s)\n' %
                    (mode, defaults.default_band_func(poi, outputfunc)) +
                    '(print-dos 0 1.2 121)\n\n')

    jobname = 'TriHoles2D_{0}_r{1:03.0f}'.format(mat.name, radius * 1000)

    sim = Simulation(jobname=jobname + job_name_suffix,
                     geometry=geom,
                     kspace=kspace,
                     numbands=numbands,
                     resolution=resolution,
                     mesh_size=mesh_size,
                     initcode=defaults.default_initcode +
                     '(set! default-material {0})'.format(str(mat)),
                     postcode='',
                     runcode=runcode,
                     work_in_subfolder=path.join(containing_folder,
                                                 jobname + job_name_suffix),
                     clear_subfolder=runmode.startswith('s')
                     or runmode.startswith('c'))

    draw_bands_title = ('2D hex. PhC; {0}, radius={1:0.3f}'.format(
        mat.name, geom.objects[0].radius) + bands_title_appendix)

    return do_runmode(
        sim,
        runmode,
        num_processors,
        draw_bands_title,
        plot_crop_y=True,  # automatic cropping
        convert_field_patterns=convert_field_patterns,
        field_pattern_plot_filetype=defaults.field_dist_filetype,
        # don't add gamma point a second time (index 3):
        field_pattern_plot_k_selection=None,
        x_axis_hint=[defaults.default_x_axis_hint,
                     kspace][kspace.has_labels()])
    parser = argparse.ArgumentParser(description='Computes parameter sensitivity')
    parser.add_argument('step', type=str, nargs='?', default='all', choices=['sim', 'sen', 'all'], help='Step name (sim - run simulations, sen - compute sensitivity)')
    parser.add_argument('-n', '--name', type=str, nargs=1, help='Analysis name')

    params = parser.parse_args()
    experiment_name = None if params.name is None else params.name[0]

    if params.step in ['all', 'sim']:
        print('Generating parameter sets')
        param_values = saltelli.sample(problem, 1000, calc_second_order=True)

        inputs = param_values_to_inputs(param_values)

        results = []
        for i in tqdm(iterable=inputs, unit='simulation', desc='Running simulations'):
            sim = Simulation(kwargs=i)

            sim.Config.quiet = True
            sim.Config.visualise = False
            sim.Config.verbose = False
            sim.Config.simulation_steps = 1000
            sim.Config.print_summary = False

            result = sim.run()
            result_dict = {f'r_{k}': v for k, v in result.items()}
            param_dict = {f'p_{k}': v for k, v in i.items()}

            results.append({**param_dict, **result_dict})

        df = pd.DataFrame(data = results)
        print('Saving simulations results')
from simulation import Simulation
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
import config

config.use_ticks = True
config.DRAW = False
env = Simulation()
# Optional: PPO2 requires a vectorized environment to run
# the env is now wrapped automatically when passing it to the constructor
# env = DummyVecEnv([lambda: env])

model = PPO2(MlpPolicy, env, verbose=1)
model.learn(total_timesteps=10000)

obs = env.reset()
for i in range(1000):
    action, _states = model.predict(obs)
    obs, rewards, dones, info = env.step(action)
    env.render()

env.close()
Esempio n. 36
0
from simulation import Simulation

sim = Simulation()
sim.pygame_setup()
sim.create_tank()
sim.run_simulation()
Esempio n. 37
0
from simulation import Simulation
from setting import *
from statistics import *

if __name__ == "__main__":
    params = ParameterProvider.NORMAL

    sim = Simulation(
        Setting(**params),
        Statistics(),
    )

    sim.Run()
Esempio n. 38
0
def clear_all_sims():
    from simulation import Simulation
    Simulation.clear_all_sims()
                                     self.id)
                    mensajes_enviados += 1
                    self.transmit(newevent)


# ----------------------------------------------------------------------------------------
# "main()"
# ----------------------------------------------------------------------------------------
# construye una instancia de la clase Simulation recibiendo como parametros el nombre del
# archivo que codifica la lista de adyacencias de la grafica y el tiempo max. de simulacion

if len(sys.argv) != 2:
    print("Por favor proporcione el nombre de la grafica de comunicaciones")
    raise SystemExit(1)

experiment = Simulation(sys.argv[1], 100)

# asocia un pareja proceso/modelo con cada nodo de la grafica
for i in range(1, len(experiment.graph) + 1):
    m = AlgorithmDFS()
    experiment.setModel(m, i)

# inserta un evento semilla en la agenda y arranca
inicio = default_timer()
seed = Event("DESCUBRE", 0.0, 1, 1)
mensajes_enviados += 1
experiment.init(seed)
experiment.run()
fin = default_timer()
print("\tTiempo del programa = ", fin - inicio)
print("\tMensajes enviados en total = ", mensajes_enviados, "\n")
Esempio n. 40
0
dyy = dyy**2
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>. Connection Done >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>..
connection_list = cortical_to_cortical_connection(background_population_dict,
                                                  internal_population_dict,
                                                  DEE, DIE, DEI, DII, LEE, LIE,
                                                  LEI, LII, fE, fI, 0.0)
"""
"""
tau_d = 128.0
simulation = Simulation(population_list,
                        connection_list,
                        Net_settings,
                        Cell_type_num,
                        DEE + LEE / tau_d,
                        DIE + LIE / tau_d,
                        DEI + LEI / tau_d,
                        DII + LII / tau_d,
                        LEE,
                        LIE,
                        LEI,
                        LII,
                        tau_d,
                        verbose=True)
(mEbin_ra, mIbin_ra, rEbin_ra, rIbin_ra, P_MFEbin_ra, NMDAEbin_ra, NMDAIbin_ra, VEavgbin_ra,
 VIavgbin_ra, VEstdbin_ra, VIstdbin_ra, \
 VEmubin_ra, VImubin_ra, VEsigbin_ra, VIsigbin_ra) = simulation.update(t0=t0, dt=dt, tf=tf)
ISOTIMEFORMAT = '%Y%m%d%H%M%S'
fileparamname = str(time.strftime(ISOTIMEFORMAT)) + '_paramsLI.mat'
scio.savemat(
    fileparamname, {
        'DEE': DEE,
        'DEI': DEI,
    def test_kernl_reduce_rflo(self):
        """Verifies that KeRNL reduces to RFLO in special case.

        If beta is initialized to the identity while the gammas are all
        initialized to the network inverse time constant alpha, and the KeRNL
        optimizer has 0 learning rate (i.e. beta and gamma do not change), then
        KeRNL should produce the same gradients as RFLO if the approximate
        KeRNL of (1 - alpha) (rather than exp(-alpha)) is used."""

        self.task = Add_Task(4, 6, deterministic=True, tau_task=2)
        self.data = self.task.gen_data(100, 0)

        alpha = 0.3

        self.rnn_1 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        self.rnn_2 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        #RFLO
        np.random.seed(1)
        self.optimizer_1 = Stochastic_Gradient_Descent(lr=0.001)
        self.learn_alg_1 = RFLO(self.rnn_1, alpha)
        #KeRNL with beta and gamma fixed to RFLO values
        np.random.seed(1)
        self.optimizer_2 = Stochastic_Gradient_Descent(lr=0.001)
        self.KeRNL_optimizer = Stochastic_Gradient_Descent(lr=0)
        A = np.eye(self.rnn_2.n_h)
        alpha_i = np.ones(self.rnn_2.n_h) * alpha
        self.learn_alg_2 = KeRNL(self.rnn_2,
                                 self.KeRNL_optimizer,
                                 A=A,
                                 alpha=alpha_i)

        monitors = []

        np.random.seed(2)
        self.sim_1 = Simulation(self.rnn_1)
        self.sim_1.run(self.data,
                       learn_alg=self.learn_alg_1,
                       optimizer=self.optimizer_1,
                       monitors=monitors,
                       verbose=False)

        np.random.seed(2)
        self.sim_2 = Simulation(self.rnn_2)
        self.sim_2.run(self.data,
                       learn_alg=self.learn_alg_2,
                       optimizer=self.optimizer_2,
                       monitors=monitors,
                       verbose=False)

        #Assert networks learned the same weights
        assert_allclose(self.rnn_1.W_rec, self.rnn_2.W_rec)
        #Assert networks' parameters changed appreciably, despite a large
        #tolerance for closeness.
        self.assertFalse(np.isclose(self.W_rec, self.rnn_2.W_rec).all())
class Test_KeRNL(unittest.TestCase):
    @classmethod
    def setUpClass(cls):
        cls.W_in = np.eye(2)
        cls.W_rec = np.eye(2)
        cls.W_out = np.eye(2)
        cls.b_rec = np.zeros(2)
        cls.b_out = np.zeros(2)

        cls.rnn = RNN(cls.W_in,
                      cls.W_rec,
                      cls.W_out,
                      cls.b_rec,
                      cls.b_out,
                      activation=identity,
                      alpha=1,
                      output=softmax,
                      loss=softmax_cross_entropy)

        cls.rnn.h = np.ones(2)
        cls.rnn.a = np.ones(2)
        cls.rnn.a_prev = np.ones(2)
        cls.rnn.x = np.ones(2) * 2
        cls.rnn.error = np.ones(2) * 0.5
        cls.rnn.y_prev = np.ones(2) * 0.5
        cls.rnn.y = np.ones(2) * 2

    def test_update_learning_vars(self):

        optimizer = Stochastic_Gradient_Descent(lr=1)
        self.learn_alg = KeRNL(self.rnn, optimizer, sigma_noise=1)
        self.learn_alg.zeta = np.array([0.1, 0.1])
        self.rnn.next_state(self.rnn.x)
        self.learn_alg.update_learning_vars()

        noise_error = self.learn_alg.zeta - np.array([0.1, 0.1])
        A_grads = np.multiply.outer(noise_error, self.learn_alg.zeta)

        correct_alpha = np.array([0.8, 0.8])
        correct_A = np.eye(2) - A_grads
        correct_B = np.array([[1, 1, 2, 2, 1], [1, 1, 2, 2, 1]])

        assert_allclose(self.learn_alg.alpha, correct_alpha)
        assert_allclose(self.learn_alg.A, correct_A)
        assert_allclose(self.learn_alg.B, correct_B)

    def test_get_rec_grads(self):

        A = np.array([[1, 2], [1, 2]])
        B = np.array([[1, 2], [0, 2]])
        optimizer = Stochastic_Gradient_Descent(lr=1)
        self.learn_alg = KeRNL(self.rnn, optimizer, sigma_noise=1, A=A, B=B)
        self.learn_alg.q = np.array([0.5, 0.5])
        rec_grads = self.learn_alg.get_rec_grads()

        correct_rec_grads = np.array([[1, 2], [0, 4]])

        assert_allclose(rec_grads, correct_rec_grads)

    def test_kernl_reduce_rflo(self):
        """Verifies that KeRNL reduces to RFLO in special case.

        If beta is initialized to the identity while the gammas are all
        initialized to the network inverse time constant alpha, and the KeRNL
        optimizer has 0 learning rate (i.e. beta and gamma do not change), then
        KeRNL should produce the same gradients as RFLO if the approximate
        KeRNL of (1 - alpha) (rather than exp(-alpha)) is used."""

        self.task = Add_Task(4, 6, deterministic=True, tau_task=2)
        self.data = self.task.gen_data(100, 0)

        alpha = 0.3

        self.rnn_1 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        self.rnn_2 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        #RFLO
        np.random.seed(1)
        self.optimizer_1 = Stochastic_Gradient_Descent(lr=0.001)
        self.learn_alg_1 = RFLO(self.rnn_1, alpha)
        #KeRNL with beta and gamma fixed to RFLO values
        np.random.seed(1)
        self.optimizer_2 = Stochastic_Gradient_Descent(lr=0.001)
        self.KeRNL_optimizer = Stochastic_Gradient_Descent(lr=0)
        A = np.eye(self.rnn_2.n_h)
        alpha_i = np.ones(self.rnn_2.n_h) * alpha
        self.learn_alg_2 = KeRNL(self.rnn_2,
                                 self.KeRNL_optimizer,
                                 A=A,
                                 alpha=alpha_i)

        monitors = []

        np.random.seed(2)
        self.sim_1 = Simulation(self.rnn_1)
        self.sim_1.run(self.data,
                       learn_alg=self.learn_alg_1,
                       optimizer=self.optimizer_1,
                       monitors=monitors,
                       verbose=False)

        np.random.seed(2)
        self.sim_2 = Simulation(self.rnn_2)
        self.sim_2.run(self.data,
                       learn_alg=self.learn_alg_2,
                       optimizer=self.optimizer_2,
                       monitors=monitors,
                       verbose=False)

        #Assert networks learned the same weights
        assert_allclose(self.rnn_1.W_rec, self.rnn_2.W_rec)
        #Assert networks' parameters changed appreciably, despite a large
        #tolerance for closeness.
        self.assertFalse(np.isclose(self.W_rec, self.rnn_2.W_rec).all())
    def test_small_lr_case(self):

        alpha = 1

        self.rnn_1 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        self.rnn_2 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        self.rnn_3 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        lr = 0.00001
        self.optimizer_1 = Stochastic_Gradient_Descent(lr=lr)
        self.learn_alg_1 = RTRL(self.rnn_1)
        self.optimizer_2 = Stochastic_Gradient_Descent(lr=lr)
        self.learn_alg_2 = Future_BPTT(self.rnn_2, 25)
        self.optimizer_3 = Stochastic_Gradient_Descent(lr=lr)
        self.learn_alg_3 = Efficient_BPTT(self.rnn_3, 100)

        monitors = []

        np.random.seed(1)
        self.sim_1 = Simulation(self.rnn_1)
        self.sim_1.run(self.data,
                       learn_alg=self.learn_alg_1,
                       optimizer=self.optimizer_1,
                       monitors=monitors,
                       verbose=False)

        np.random.seed(1)
        self.sim_2 = Simulation(self.rnn_2)
        self.sim_2.run(self.data,
                       learn_alg=self.learn_alg_2,
                       optimizer=self.optimizer_2,
                       monitors=monitors,
                       verbose=False)

        np.random.seed(1)
        self.sim_3 = Simulation(self.rnn_3)
        self.sim_3.run(self.data,
                       learn_alg=self.learn_alg_3,
                       optimizer=self.optimizer_3,
                       monitors=monitors,
                       verbose=False)

        #Assert networks learned similar weights with a small tolerance.
        assert_allclose(self.rnn_1.W_rec, self.rnn_2.W_rec, atol=1e-4)
        assert_allclose(self.rnn_2.W_rec, self.rnn_3.W_rec, atol=1e-4)
        #But that there was some difference from initialization
        self.assertFalse(
            np.isclose(self.rnn_1.W_rec, self.W_rec, atol=1e-4).all())
Esempio n. 44
0
import sys, os
root_dir = os.getcwd()
sub_dirs = [x[0] for x in os.walk(root_dir)]
sys.path.extend(sub_dirs)
from simulation import Simulation

L_box = 10.0
N_cells = 16

output_dir = '/raid/bruno/data/hydro_1D/test'

Sim = Simulation(L_box, N_cells, output_dir)

Sim.Set_Initial_Conditions()

Sim.Set_Boundaries()

Sim.Write_Snapshot()

# print( f'{Sim.mpi.id}  {Sim.density}')
# print( f'{Sim.mpi.id}  {Sim.momentum}')
# print( f'{Sim.mpi.id}  {Sim.energy}')

Sim.mpi.print('Finished Succesfully ')
Esempio n. 45
0
def simulate_elections():
    return Simulation(parties.ALL)
Esempio n. 46
0
import data
from simulation import Simulation

data.sample()
a = Simulation()
a.simulation_sample()
Esempio n. 47
0
    def __init__(self,
                 port=8813,
                 numRetries=10,
                 host="localhost",
                 label="default"):
        #if _embedded:
        #    return self.getVersion()
        self.simulation = Simulation(self)
        self.vehicle = Vehicle(self)
        self.trafficlights = TrafficLights(self)
        self._connections = {}
        self._connections[""] = self._connections[label] = socket.socket()
        self._message = Message()
        for wait in range(numRetries):
            try:
                self._connections[label].connect((host, port))
                self._connections[label].setsockopt(socket.IPPROTO_TCP,
                                                    socket.TCP_NODELAY, 1)
                logging.info('Connected to host: ' + host)
                break
            except socket.error:
                logging.warn('Unable to connect to host: ' + host +
                             ' . Retrying...')
                time.sleep(wait)
        #return self.getVersion()

        self._modules = {
            #constants.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE: inductionloop,
            #constants.RESPONSE_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE:\
            #multientryexit,
            #constants.RESPONSE_SUBSCRIBE_AREAL_DETECTOR_VARIABLE: areal,
            constants.RESPONSE_SUBSCRIBE_TL_VARIABLE:
            self.trafficlights,
            #constants.RESPONSE_SUBSCRIBE_LANE_VARIABLE: lane,
            constants.RESPONSE_SUBSCRIBE_VEHICLE_VARIABLE:
            self.vehicle,
            #constants.RESPONSE_SUBSCRIBE_VEHICLETYPE_VARIABLE: vehicletype,
            #constants.RESPONSE_SUBSCRIBE_ROUTE_VARIABLE: route,
            #constants.RESPONSE_SUBSCRIBE_POI_VARIABLE: poi,
            #constants.RESPONSE_SUBSCRIBE_POLYGON_VARIABLE: polygon,
            #constants.RESPONSE_SUBSCRIBE_JUNCTION_VARIABLE: junction,
            #constants.RESPONSE_SUBSCRIBE_EDGE_VARIABLE: edge,
            constants.RESPONSE_SUBSCRIBE_SIM_VARIABLE:
            self.simulation,

            #constants.RESPONSE_SUBSCRIBE_GUI_VARIABLE: gui,

            #constants.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_CONTEXT: inductionloop,
            #constants.RESPONSE_SUBSCRIBE_MULTI_ENTRY_EXIT_DETECTOR_CONTEXT:\
            #multientryexit,
            #constants.RESPONSE_SUBSCRIBE_AREAL_DETECTOR_CONTEXT: areal,
            constants.RESPONSE_SUBSCRIBE_TL_CONTEXT:
            self.trafficlights,
            #constants.RESPONSE_SUBSCRIBE_LANE_CONTEXT: lane,
            constants.RESPONSE_SUBSCRIBE_VEHICLE_CONTEXT:
            self.vehicle,
            #constants.RESPONSE_SUBSCRIBE_VEHICLETYPE_CONTEXT: vehicletype,
            #constants.RESPONSE_SUBSCRIBE_ROUTE_CONTEXT: route,
            #constants.RESPONSE_SUBSCRIBE_POI_CONTEXT: poi,
            #constants.RESPONSE_SUBSCRIBE_POLYGON_CONTEXT: polygon,
            #constants.RESPONSE_SUBSCRIBE_JUNCTION_CONTEXT: junction,
            #constants.RESPONSE_SUBSCRIBE_EDGE_CONTEXT: edge,
            constants.RESPONSE_SUBSCRIBE_SIM_CONTEXT:
            self.simulation,

            #constants.RESPONSE_SUBSCRIBE_GUI_CONTEXT: gui,

            #constants.CMD_GET_INDUCTIONLOOP_VARIABLE: inductionloop,
            #constants.CMD_GET_MULTI_ENTRY_EXIT_DETECTOR_VARIABLE:\
            #multientryexit,
            #constants.CMD_GET_AREAL_DETECTOR_VARIABLE: areal,
            constants.CMD_GET_TL_VARIABLE:
            self.trafficlights,
            #constants.CMD_GET_LANE_VARIABLE: lane,
            constants.CMD_GET_VEHICLE_VARIABLE:
            self.vehicle,
            #constants.CMD_GET_VEHICLETYPE_VARIABLE: vehicletype,
            #constants.CMD_GET_ROUTE_VARIABLE: route,
            #constants.CMD_GET_POI_VARIABLE: poi,
            #constants.CMD_GET_POLYGON_VARIABLE: polygon,
            #constants.CMD_GET_JUNCTION_VARIABLE: junction,
            #constants.CMD_GET_EDGE_VARIABLE: edge,
            constants.CMD_GET_SIM_VARIABLE:
            self.simulation
            #constants.CMD_GET_GUI_VARIABLE: gui
        }
       self.root = Tk()
       self.root.geometry("400x300")
       self.app = Window(self.root)
       self.root.mainloop()
       
   def close(self):
       self.root.quit()
       self.app.quit()

surface = Surface()

plane = Airplane(100000, 100, 15)
airplane = plane
plane.position = np.array((0., 0., 10000.))
plane.velocity = np.array((150., 150., 2.))
sim = Simulation(plane, surface)


pygame.init()
display = (800,600)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)

tx = 0
ty = 0
tz = 0
ry = 0
rz = 0

glMatrixMode(GL_PROJECTION)
gluPerspective(45, (display[0] / display[1]), 0.1, 100000.0)
Esempio n. 49
0
with open(question_path) as f:
    anns = json.load(f)

total, correct = 0, 0
total_expl, correct_expl = 0, 0
total_pred, correct_pred = 0, 0
total_coun, correct_coun = 0, 0

pbar = tqdm(range(5000))

for ann_idx in pbar:
    question_scene = anns[ann_idx]
    file_idx = ann_idx + 10000
    ann_path = os.path.join(raw_motion_dir, 'sim_%05d.json' % file_idx)

    sim = Simulation(ann_path, use_event_ann=(args.use_event_ann != 0))
    exe = Executor(sim)

    for q_idx, q in enumerate(question_scene['questions']):
        q_type = q['question_type']
        if q_type == 'descriptive':
            continue
        question = q['question']
        parsed_pg = parsed_pgs[str(
            file_idx)]['questions'][q_idx]['question_program']
        pred = exe.run(parsed_pg, debug=False)
        ans = q['answer']
        if pred == ans:
            correct += 1
        total += 1
        if q_type.startswith('explanatory'):
Esempio n. 50
0
def TriHolesSlab3D_Waveguide(material,
                             radius,
                             thickness,
                             mode='zeven',
                             numbands=8,
                             k_steps=17,
                             supercell_size=5,
                             supercell_z=6,
                             resolution=32,
                             mesh_size=7,
                             ydirection=False,
                             first_row_longitudinal_shift=0,
                             first_row_transversal_shift=0,
                             first_row_radius=None,
                             second_row_longitudinal_shift=0,
                             second_row_transversal_shift=0,
                             second_row_radius=None,
                             runmode='sim',
                             num_processors=2,
                             projected_bands_folder='../projected_bands_repo',
                             plot_complete_band_gap=False,
                             save_field_patterns_kvecs=list(),
                             save_field_patterns_bandnums=list(),
                             convert_field_patterns=False,
                             job_name_suffix='',
                             bands_title_appendix='',
                             plot_crop_y=False,
                             field_pattern_plot_k_selection=()):
    """Create a 3D MPB Simulation of a slab with a triangular lattice of
    holes, with a waveguide along the nearest neighbor direction, i.e.
    Gamma->K direction.

    The simulation is done with a cubic super cell.

    Before the waveguide simulation, additional simulations of the
    unperturbed structure will be run for projected bands data, if these
    simulations where not run before.

    :param material: can be a string (e.g. SiN,
    4H-SiC-anisotropic_c_in_z; defined in data.py) or just the epsilon
    value (float)
    :param radius: the radius of holes in units of the lattice constant
    :param thickness: slab thickness in units of the lattice constant
    :param mode: the mode to run. Possible are 'zeven' and 'zodd'.
    :param numbands: number of bands to calculate
    :param k_steps: number of k steps along the waveguide direction
    between 0 and 0.5 to simulate. This can also be a list of the
    explicit k values (just scalar values for component along the
    waveguide axis) to be simulated.
    :param supercell_size: the length of the supercell perpendicular to the
    waveguide, in units of sqrt(3) times the lattice constant. If it is
    not a odd number, one will be added.
    :param supercell_z: the height of the supercell in units of the
    lattice constant
    :param resolution: described in MPB documentation
    :param mesh_size: described in MPB documentation
    :param ydirection: set this if the waveguide should point along y,
    otherwise (default) it will point along x. Use the default if you
    want to use yparity data.
    :param first_row_longitudinal_shift: shifts the holes next to the
    waveguide by this amount, parallel to the waveguide direction.
    :param first_row_transversal_shift: shifts the holes next to the
    waveguide by this amount, perpendicular to the waveguide direction.
    :param first_row_radius: The radius of the holes next to the
    waveguide. If None (default), use radius.
    :param second_row_longitudinal_shift: shifts the holes in the second
    row next to the waveguide by this amount, parallel to the waveguide
    direction
    :param second_row_transversal_shift: shifts the holes in the second
    row next to the waveguide by this amount, perpendicular to the
    waveguide direction
    :param second_row_radius: The radius of the holes in the second row
    next to the waveguide. If None (default), use radius.
    :param runmode: can be one of the following:
        ''       : just create and return the simulation object
        'ctl'    : create the sim object and save the ctl file
        'sim' (default): run the simulation and do all postprocessing
        'postpc' : do all postprocessing; simulation should have run
                   before!
        'display': display all pngs done during postprocessing. This is
                   the only mode that is interactive.
    :param num_processors: number of processors used during simulation
    :param projected_bands_folder: the path to the folder which will
    contain the simulations of the unperturbed PhC, which is needed for
    the projections perpendicular to the waveguide direction. If the
    folder contains simulations run before, their data will be reused.
    :param plot_complete_band_gap: If this is False, the band gap will be a
    function of the k component along the waveguide. For each k,
    a simulation with unperturbed photonic crystal will be run to get
    the data. If this is True, only one unperturbed simulation will be
    run to find the full direction independent bandgap.
    :param save_field_patterns_kvecs: a list of k-vectors (3-tuples),
    which indicates where field pattern h5 files are generated during
    the simulation (only at bands in save_field_patterns_bandnums)
    :param save_field_patterns_bandnums: a list of band numbers (int,
    starting at 1), which indicates where field pattern h5 files are
    generated during the simulation (only at k-vectors in
    save_field_patterns_kvecs)
    :param convert_field_patterns: indicates whether field pattern h5
    files should be converted to png (only when postprocessing)
    :param job_name_suffix: Optionally specify a job_name_suffix
    (appendix to the folder name etc.) which will be appended to the
    jobname created automatically from the most important parameters.
    :param bands_title_appendix: will be added to the title of the bands
    diagram.
    :param plot_crop_y:
    the band diagrams are automatically cropped before the last band
    if plot_crop_y is True, alternatively use plot_crop_y to specify
    the max. y-value where the plot will be cropped.
    :return: the Simulation object

    """
    mat = Dielectric(material)

    # first, make sure all data for projected bands exist, otherwise
    # start their simulations.

    unperturbed_jobname = 'TriHolesSlab_{0}_r{1:03.0f}_t{2:03.0f}'.format(
        mat.name, radius * 1000, thickness * 1000)
    # look here for old simulations, and place new ones there:
    repo = path.abspath(
        path.join(path.curdir, projected_bands_folder, unperturbed_jobname))
    # create path if not there yet:
    if not path.exists(path.abspath(repo)):
        makedirs(path.abspath(repo))

    # these k points will be simulated (along waveguide):
    if isinstance(k_steps, (int, float)):
        k_steps = int(k_steps)
        k_points = np.linspace(0, 0.5, num=k_steps, endpoint=True)
    else:
        k_points = np.array(k_steps)

    # This list will be forwarded later to this defect simulation's
    # post-process. It contains the folder paths of unperturbed
    # simulations for each k-vec of this simulation (or only one simulation,
    # if the plotted band gap does not change from k-vec to k-vec):
    project_bands_list = []

    if plot_complete_band_gap:
        if mode == 'zeven':
            # We only need a simulation of the first two bands at the M
            # and the K point to get the band gap.

            # first, see if we need to simulate:
            jobname_suffix = '_for_gap'
            jobname = unperturbed_jobname + jobname_suffix
            project_bands_list.append(path.join(repo, jobname))
            range_file_name = path.join(repo, jobname,
                                        jobname + '_' + mode + '_ranges.csv')
            if not path.isfile(range_file_name):
                # does not exist, so start simulation:
                log.info('unperturbed structure not yet simulated for '
                         'band gap. Running now...')
                kspace = KSpace(points_list=[(0, 0.5, 0),
                                             ('(/ -3)', '(/ 3)', 0)],
                                k_interpolation=0,
                                point_labels=['M', 'K'])

                sim = TriHolesSlab3D(
                    material=material,
                    radius=radius,
                    thickness=thickness,
                    custom_k_space=kspace,
                    numbands=3,  # 3 so the band plot looks better ;)
                    resolution=resolution,
                    mesh_size=mesh_size,
                    supercell_z=supercell_z,
                    runmode='sim' if runmode.startswith('s') else '',
                    num_processors=num_processors,
                    containing_folder=repo,
                    save_field_patterns=False,
                    convert_field_patterns=False,
                    job_name_suffix=jobname_suffix,
                    bands_title_appendix=', for band gap',
                    modes=[mode])

                if not sim:
                    log.error(
                        'an error occurred during simulation of unperturbed '
                        'structure. See the .out file in {0}'.format(
                            path.join(repo, jobname)))
                    return

                # Now, the _ranges.csv file is wrong, because we did not
                # simulate the full K-Space, especially Gamma is
                # missing. Correct the ranges so the first band starts
                # at 0 and the second band is the last band and goes to
                # a very high value. This way, there is only the band
                # gap left between the first and second continuum bands.

                # Load the _ranges.csv file to get the band gap:
                ranges = np.loadtxt(range_file_name, delimiter=',', ndmin=2)
                # tinker:
                ranges[0, 1] = 0
                ranges[1, 2] = ranges[1, 2] * 100
                # save file again, drop higher bands:
                np.savetxt(range_file_name,
                           ranges[:2, :],
                           header='bandnum, min, max',
                           fmt=['%.0f', '%.6f', '%.6f'],
                           delimiter=', ')
        else:
            # For high refractive indices and big radius, there are some
            # small gaps for TM modes. But we need to simulate more
            # bands and more k-points than for the TE modes. This is
            # especially difficult (or even impossible?), since
            # quasi-guided PhC bands (which narrow the band gap) are
            # hidden by continuum modes above the light line in 3D.
            # I don't need it, so it is not implemented yet:
            log.warning('plot_complete_band_gap not implemented for {0}'
                        ' modes yet.'.format(mode))

    else:
        # Note: in the following, I use a triangular lattice, which is
        # orientated such that the Gamma->K direction points towards y
        # in cartesian coordinates. If ydirection is False, it does not
        # matter, because the projected bands stay the same.

        # In the triangular lattice, in the basis of its reciprocal
        # basis vectors, this is the K' point, i.e. die boundary of the
        # first brillouin zone in the rectangular lattice, onto which we
        # need to project (see also : Steven G. Johnson et al., "Linear
        # waveguides in photonic-crystal slabs", Phys. Rev. B, Vol. 62,
        # Nr.12, 8212-8222 (2000); page 8216 & Fig. 8):
        rectBZ_K = np.array((0.25, -0.25))
        # the M point in the triangular lattice reciprocal basis, which
        # points along +X (perpendicular to a waveguide in k_y
        # direction): (note: if k_y is greater than 1/3, we leave the
        # 1st BZ in +x direction. But this is OK and we calculate it
        # anyway, because it does not change the projection. If we want
        # to optimize calculation time some time, we could limit this.)
        triBZ_M = np.array((0.5, 0.5))

        # now, see if we need to simulate:
        for ky in k_points:
            jobname_suffix = '_projk{0:06.0f}'.format(ky * 1e6)
            jobname = unperturbed_jobname + jobname_suffix
            project_bands_list.append(path.join(repo, jobname))
            range_file_name = path.join(repo, jobname,
                                        jobname + '_' + mode + '_ranges.csv')
            if not path.isfile(range_file_name):
                # does not exist, so start simulation:
                log.info('unperturbed structure not yet simulated at '
                         'k_wg={0}. Running now...'.format(ky))
                kspace = KSpace(
                    points_list=[
                        rectBZ_K * ky * 2, rectBZ_K * ky * 2 + triBZ_M
                    ],
                    k_interpolation=15,
                )

                sim = TriHolesSlab3D(
                    material=material,
                    radius=radius,
                    thickness=thickness,
                    custom_k_space=kspace,
                    numbands=defaults.num_projected_bands,
                    resolution=resolution,
                    supercell_z=supercell_z,
                    mesh_size=mesh_size,
                    runmode='sim' if runmode.startswith('s') else '',
                    num_processors=num_processors,
                    containing_folder=repo,
                    save_field_patterns=False,
                    convert_field_patterns=False,
                    job_name_suffix=jobname_suffix,
                    bands_title_appendix=', at k_wg={0:0.3f}'.format(ky),
                    modes=[mode])

                if not sim:
                    log.error(
                        'an error occurred during simulation of unperturbed '
                        'structure. See the .out file in {0}'.format(
                            path.join(repo, jobname)))
                    return

    # If a shift is used, inversion symmetry is broken:
    if ((first_row_longitudinal_shift or second_row_longitudinal_shift)
            and 'mpbi' in defaults.mpb_call):
        log.info('default MPB to use includes inversion symmetry: '
                 '{0}. '.format(defaults.mpb_call) +
                 'Shift of holes specified, which breaks inv. symmetry. '
                 'Will fall back to MPB without inv. symm.: {0}'.format(
                     defaults.mpb_call.replace('mpbi', 'mpb')))
        defaults.mpb_call = defaults.mpb_call.replace('mpbi', 'mpb')

    # make it odd:
    if supercell_size % 2 == 0:
        supercell_size += 1
    # half of the supercell (floored):
    sch = int(supercell_size / 2)

    # Create geometry and add objects.
    objects = get_triangular_phc_waveguide_air_rods(
        radius=radius,
        supercell_size=supercell_size,
        ydirection=ydirection,
        first_row_longitudinal_shift=first_row_longitudinal_shift,
        first_row_transversal_shift=first_row_transversal_shift,
        first_row_radius=first_row_radius,
        second_row_longitudinal_shift=second_row_longitudinal_shift,
        second_row_transversal_shift=second_row_transversal_shift,
        second_row_radius=second_row_radius)

    if ydirection:
        geom = Geometry(
            width='(* (sqrt 3) %i)' % supercell_size,
            height=1,
            depth=supercell_z,
            triangular=False,
            objects=([
                Block(
                    x=0,
                    y=0,
                    z=0,
                    material=mat,
                    # make it bigger than computational cell, just in case:
                    size=('(* (sqrt 3) %i)' %
                          (supercell_size + 1), 2, thickness))
            ] + objects))
        kspaceW1 = KSpace(
            points_list=[(0, ky, 0) for ky in k_points],
            k_interpolation=0,
        )
    else:
        geom = Geometry(
            width=1,
            height='(* (sqrt 3) %i)' % supercell_size,
            depth=supercell_z,
            triangular=False,
            objects=([
                Block(
                    x=0,
                    y=0,
                    z=0,
                    material=mat,
                    # make it bigger than computational cell, just in case:
                    size=(2, '(* (sqrt 3) %i)' %
                          (supercell_size + 1), thickness))
            ] + objects))
        kspaceW1 = KSpace(
            points_list=[(kx, 0, 0) for kx in k_points],
            k_interpolation=0,
        )

    jobname = 'TriHolesSlab_W1_{0}_r{1:03.0f}_t{2:03.0f}'.format(
        mat.name, radius * 1000, thickness * 1000)

    if mode == 'zeven':
        outputfuncs = defaults.output_funcs_te
    else:
        outputfuncs = defaults.output_funcs_tm

    runcode = ''
    if defaults.newmpb:
        runcode = '(optimize-grid-size!)\n\n'

    if save_field_patterns_bandnums and save_field_patterns_kvecs:
        runcode += (
            ';function to determine whether an item x is member of list:\n'
            '(define (member? x list)\n'
            '    (cond (\n'
            '        ;false if the list is empty:\n'
            '        (null? list) #f )\n'
            '        ;true if first item (car) equals x:\n'
            '        ( (eqv? x (car list)) #t )\n'
            '        ;else, drop first item (cdr) and make recursive call:\n'
            '        ( else (member? x (cdr list)) )\n'
            '    ))\n\n' + '(define output-bands-list (list {0}))\n\n'.format(
                ' '.join(map(str, save_field_patterns_bandnums))) +
            '(define (output-func bnum)\n'
            '    (if (member? bnum output-bands-list)\n'
            '        (begin\n' + ''.join(12 * ' ' + '({0} bnum)\n'.format(func)
                                         for func in outputfuncs) +
            '        )\n'
            '    ))\n\n'
            '(run-{0} {1})\n'.format(
                mode,
                defaults.default_band_func(save_field_patterns_kvecs,
                                           'output-func')) +
            '(print-dos 0 1.2 121)\n\n')
    else:
        runcode += ('(run-{0} {1})\n'.format(
            mode, defaults.default_band_func([], None)) +
                    '(print-dos 0 1.2 121)\n\n')

    sim = Simulation(jobname=jobname + job_name_suffix,
                     geometry=geom,
                     kspace=kspaceW1,
                     numbands=numbands,
                     resolution=resolution,
                     mesh_size=mesh_size,
                     initcode=defaults.default_initcode,
                     postcode='',
                     runcode=runcode,
                     clear_subfolder=runmode.startswith('s')
                     or runmode.startswith('c'))

    draw_bands_title = (
        'Hex. PhC slab W1; {0}, thickness={1:0.3f}, radius={2:0.3f}'.format(
            mat.name, geom.objects[0].size[2], radius) + bands_title_appendix)

    return do_runmode(
        sim,
        runmode,
        num_processors,
        draw_bands_title,
        plot_crop_y=plot_crop_y,
        convert_field_patterns=convert_field_patterns,
        field_pattern_plot_k_selection=field_pattern_plot_k_selection,
        field_pattern_plot_filetype=defaults.field_dist_filetype,
        x_axis_hint=[5, "{1}" if ydirection else "{0}"],
        project_bands_list=project_bands_list,
        color_by_parity='y')
Esempio n. 51
0
def build_bcca(num_pcles=1024, radius=0.5, overlap=None, store_aggs=False, use_stored=False, agg_path='.', constrain_dir=True):
    """
    Build a cluster-cluster agglomerate particle. This works by building two
    identical mass aggregates with m particles and allowing them to stick randomly
    to produce a 2m monomer aggregate. Two of these aggregates are then joined,
    and so on.

    Note that num_pcles must be a power of 2!

    To speed up subsequent runs, store_aggs=True will store each
    generation of aggregate to a file. If use_stored=True a random one of these
    files will be loaded. If insufficient files are available, new aggregates
    will be generated. All files are saved/loaded to/from agg_path (default=.)
    """

    import glob, os

    num_pcles = int(num_pcles)
    if not (num_pcles != 0 and ((num_pcles & (num_pcles - 1)) == 0)):
        print('ERROR: number of particles must be a multiple of two!')
        return None

    radius = float(radius)
    if radius <= 0:
        print('ERROR: radius must be a positive value')
        return None

    if overlap is not None:
        if (overlap<0.) or (overlap>1.):
            print('ERROR: overlap must be either None, or 0<overlap<1')
            return None

    num_gens = int(np.log2(num_pcles))

    # Generation files are stored as simple CSVs with the filename convention:
    # bcca_gen_<m>_<id>.csv
    # where <m> is the generation number (1=2 monomers, 2=4 monomers and so on)
    # and <id> is an incrementing ID (1=first file, etc.)

    # first run, generate 2 monomer BPCA aggregates
    agg_list = []
    [agg_list.append(build_bpca(num_pcles=2, radius=radius, output=False, overlap=overlap)) for i in range(num_pcles/2)]
    [agg.recentre() for agg in agg_list]

    # loop over generations needed
    for idx, gen in enumerate(range(num_gens-1,0,-1)):

        num_aggs = 2**gen
        print('INFO: Building generation %d with %d aggregates of %d monomers' % (idx+1,num_aggs,2**(idx+1)))

        next_list = [] # the list of next generation aggregate (half as big as agg_list)
        
        for agg_idx in range(0,num_aggs,2):
            sim = Simulation(max_pcles=num_pcles)
            agg1 = agg_list[agg_idx]
            agg2 = agg_list[agg_idx+1]
            sim.add_agg(agg1)
            # TODO - calculate the optimum value instead of 10 here!
            vec = random_sphere() * max(sim.farthest() * 10.0, radius *4.)
            agg2.move(vec)

            success = False
            while not success:

                second = random_sphere() * max(agg1.farthest() * 10.0, radius *4.)

                if constrain_dir:
                    direction = (second - vec)
                else:
                    direction = second + random_sphere()

                direction = direction/np.linalg.norm(direction)
                ids, dist, hit = sim.intersect(agg2.pos, direction, closest=True)

                if hit is None:
                    continue
                else:
                    agg2.move(direction*dist)

                    # now need to shift to avoid any overlap - query the intersect between
                    # two monomers that will be colliding
                    agg2.move(hit-sim.pos[np.where(sim.id==ids)[0][0]])

                    # check if there are any overlaps in the domain
                    success = sim.check(agg2.pos, agg2.radius)
                    if not success: continue

                    # if requested, move the monomer back an amount
                    if overlap is not None:
                        agg2.move( (sim.pos[np.where(sim.id==ids)[0][0]]-hit)*(overlap) )

                    sim.add_agg(agg2)
                    sim.recentre()
                    next_list.append(sim)

                    if store_aggs:
                        # bcca_gen_<m>_<id>.csv
                        agg_files = glob.glob(os.path.join(agg_path, 'bcca_gen_%03d_*.csv' % (idx+1)))
                        id_list = [int(os.path.basename(f).split('_')[3].split('.')[0]) for f in agg_files]
                        agg_id = 1 if len(id_list) == 0 else max(id_list) + 1
                        agg_file = os.path.join(agg_path, 'bcca_gen_%03d_%03d.csv' % (idx+1, agg_id))
                        agg2.to_csv(agg_file)

        agg_list = next_list

    return next_list[0]
Esempio n. 52
0
def main(sess):

    goal = tf.placeholder(dtype=tf.float32, shape=[batch_size, 2], name='goal')

    sim = Simulation(dt=dt,
                     num_particles=num_particles,
                     grid_res=res,
                     bc=bc,
                     gravity=gravity,
                     E=1,
                     m_p=1,
                     V_p=1,
                     sess=sess)
    position = np.zeros(shape=(batch_size, 2, num_particles))

    velocity_ph = tf.Variable([0.4, 0.05], trainable=True)
    velocity_1 = velocity_ph[None, :, None] + tf.zeros(
        shape=[batch_size, 2, group_particles], dtype=tf.float32)
    velocity_2 = tf.zeros(shape=[batch_size, 2, group_particles],
                          dtype=tf.float32)
    velocity = tf.concat([velocity_1, velocity_2], axis=2)

    for b in range(batch_size):
        for i in range(group_particles):
            x, y = 0, 0
            while (x - 0.5)**2 + (y - 0.5)**2 > 0.25:
                x, y = random.random(), random.random()
            position[b, :, i] = ((x * 2 + 3) / 30, (y * 2 + 12.75) / 30)

        for i in range(group_particles):
            x, y = 0, 0
            while (x - 0.5)**2 + (y - 0.5)**2 > 0.25:
                x, y = random.random(), random.random()
            position[b, :, i + group_particles] = ((x * 2 + 10) / 30,
                                                   (y * 2 + 12.75) / 30)

    sess.run(tf.global_variables_initializer())

    initial_state = sim.get_initial_state(position=position, velocity=velocity)

    final_position = sim.initial_state.center_of_mass(group_particles, None)
    loss = tf.reduce_sum((final_position - goal)**2)
    # loss = tf.reduce_sum(tf.abs(final_position - goal))
    sim.add_point_visualization(pos=final_position, color=(1, 0, 0), radius=3)
    sim.add_point_visualization(pos=goal, color=(0, 1, 0), radius=3)

    trainables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    sim.set_initial_state(initial_state=initial_state)

    sym = sim.gradients_sym(loss, variables=trainables)

    goal_input = np.array([[0.7, 0.3]], dtype=np.float32)

    for i in range(1000000):
        t = time.time()
        memo = sim.run(initial_state=initial_state,
                       num_steps=steps,
                       iteration_feed_dict={goal: goal_input},
                       loss=loss)
        grad = sim.eval_gradients(sym, memo)
        print('grad', grad)
        gradient_descent = [
            v.assign(v - lr * g) for v, g in zip(trainables, grad)
        ]
        sess.run(gradient_descent)
        print('iter {:5d} time {:.3f} loss {:.4f}'.format(
            i,
            time.time() - t, memo.loss))
        if i % 5 == 0:  # True: # memo.loss < 0.01:
            sim.visualize(memo)
Esempio n. 53
0
class Canvas(app.Canvas):
    step = 0

    def __init__(self, configuration_file, output_file, offline):
        app.Canvas.__init__(self, title='Molecular viewer', keys='interactive')

        if offline:
            self.simulation = None
            self.load_coordinates_from_file(output_file)
        else:
            self.simulation = Simulation(configuration_file,
                                         output_filename=output_file)
            self.simulation.run(s_d=2)

        self.size = 1200, 800

        self.program = gloo.Program(vertex, fragment)
        self.view = np.eye(4, dtype=np.float32)
        self.model = np.eye(4, dtype=np.float32)
        self.projection = np.eye(4, dtype=np.float32)
        self.translate = 20
        translate(self.view, 0, 0, -self.translate)

        self.load_molecules_from_simulation(self.get_coordinates())
        self.load_data()

        self.theta = 0
        self.phi = 0

        self._timer = app.Timer('auto', connect=self.on_timer, start=True)

    def load_coordinates_from_file(self, filename):
        coordinate_frames = []
        frame = []
        with open(filename) as f:
            for line in f:
                if line == '\n':
                    if frame:
                        coordinate_frames.append(frame)
                        frame = []
                else:
                    coordinate = [float(x) for x in line.split()]
                    frame.append(coordinate)

        self.offline_atoms = coordinate_frames

    def get_coordinates(self):
        if self.simulation:
            self.simulation.step()
            return self.simulation.atoms
        else:
            coordinates = self.offline_atoms[self.step]
            self.step += 1
            return coordinates

    def load_molecules_from_simulation(self, atom_coordinates):
        self._nAtoms = len(atom_coordinates)

        # The x,y,z values store in one array
        self.coords = atom_coordinates

        # The array that will store the color and alpha scale for all the atoms
        self.atomsColours = np.array([[0, 1, 0]] * self._nAtoms)

        # The array that will store the scale for all the atoms.
        self.atomsScales = np.array([0.3] * self._nAtoms)

    def load_data(self):
        n = self._nAtoms

        data = np.zeros(n, [('a_position', np.float32, 3),
                            ('a_color', np.float32, 3),
                            ('a_radius', np.float32, 1)])

        data['a_position'] = self.coords
        data['a_color'] = self.atomsColours
        data['a_radius'] = self.atomsScales

        self.program.bind(gloo.VertexBuffer(data))

        self.program['u_model'] = self.model
        self.program['u_view'] = self.view
        self.program['u_light_position'] = 0., 0., 2.
        self.program['u_light_spec_position'] = -5., 5., -5.

    def on_initialize(self, event):
        gloo.set_state(depth_test=True, clear_color='black')

    def on_key_press(self, event):
        if event.text == ' ':
            if self.timer.running:
                self.timer.stop()
            else:
                self.timer.start()

    def on_timer(self, event):
        self.theta += 0.0
        self.phi += 0.0
        self.model = np.eye(4, dtype=np.float32)

        rotate(self.model, self.theta, 0, 0, 1)
        rotate(self.model, self.phi, 0, 1, 0)

        self.coords = self.get_coordinates()
        self.load_data()
        self.program['u_model'] = self.model
        self.update()

    def on_resize(self, event):
        width, height = event.size
        gloo.set_viewport(0, 0, width, height)
        self.projection = perspective(25.0, width / float(height), 2.0, 100.0)
        self.program['u_projection'] = self.projection

    def on_mouse_wheel(self, event):
        self.translate -= event.delta[1]
        self.translate = max(-1, self.translate)
        self.view = np.eye(4, dtype=np.float32)

        translate(self.view, 0, 0, -self.translate)

        self.program['u_view'] = self.view
        self.update()

    def on_draw(self, event):
        gloo.clear()
        self.program.draw('points')
                    help='Toggle persistent walking',
                    action='store_true')
parser.add_argument('-dw',
                    '--directed_walking',
                    default=False,
                    help='Toggle directed walking',
                    action='store_true')
parser.add_argument('-b',
                    '--block_limit',
                    default=None,
                    help='The number of blocks to be used for the simulation,'
                    ' starting from the oldest blocks',
                    type=int)
parser.add_argument(
    '-a',
    '--alpha',
    default=0.1,
    help='The alpha factor used for the directed walking algorithm',
    type=float)
parser.add_argument(
    '-tp',
    '--teleport_probability',
    default=0.5,
    help='The teleport probability used in the persistent walking algorithm',
    type=float)
args = parser.parse_args()

Simulation(args.time, args.dir, args.verbose, args.persistent_walking,
           args.directed_walking, args.block_limit, args.alpha,
           args.teleport_probability).start()
Esempio n. 55
0
# Read the guide star catalog

ra, dec, V = np.loadtxt(starCatalog, unpack=True)
NguideStars = len(ra)

# For each guide star, center a subfield around it, and run the simulator

for n in range(NguideStars):

    print("Running the simulator for guide star {0}".format(n))
    print("Guide Star Coordinates [deg]: {}, {}".format(ra[n], dec[n]))

    # Set up a Simulation object

    sim = Simulation(outputFilePrefix + "{0:02d}".format(n), inputFile)
    sim.outputDir = outputDir

    # Make sure it uses the right starCatalog, jitter file, and PSF file

    sim["ObservingParameters/StarCatalogFile"] = starCatalog
    sim["Platform/JitterFileName"] = jitterFile
    sim["PSF/FromFile/Filename"] = psfFile

    # Center the subfield around the current guide star
    # First extract the required information from the yaml input file.
    # Note that for this simulation, we want to use the fast cams, not the nominal ones.
    #
    # This function sets the following configuration parameters:
    #
    # Camera/FieldDistortion/IncludeFieldDistortion = True
Esempio n. 56
0
def TriHolesSlab3D(material,
                   radius,
                   thickness,
                   numbands=8,
                   k_interpolation=11,
                   resolution=32,
                   mesh_size=7,
                   supercell_z=6,
                   runmode='sim',
                   num_processors=2,
                   save_field_patterns=True,
                   convert_field_patterns=True,
                   containing_folder='./',
                   job_name_suffix='',
                   bands_title_appendix='',
                   custom_k_space=None,
                   modes=('zeven', 'zodd'),
                   substrate_material=None):
    """Create a 3D MPB Simulation of a slab with a triangular lattice of
    holes.

    :param material: can be a string (e.g. SiN,
    4H-SiC-anisotropic_c_in_z; defined in data.py) or just the epsilon
    value (float)
    :param radius: the radius of holes in units of the lattice constant
    :param thickness: slab thickness in units of the lattice constant
    :param numbands: number of bands to calculate
    :param k_interpolation: number of the k-vectors between every two of
    the used high symmetry points Gamma, M, K and Gamma again, so the
    total number of simulated k-vectors will be 3*k_interpolation + 4
    :param resolution: described in MPB documentation
    :param mesh_size: described in MPB documentation
    :param supercell_z: the height of the supercell in units of the
    lattice constant
    :param runmode: can be one of the following:
        ''       : just create and return the simulation object
        'ctl'    : create the sim object and save the ctl file
        'sim' (default): run the simulation and do all postprocessing
        'postpc' : do all postprocessing; simulation should have run
                   before!
        'display': display all pngs done during postprocessing. This is
                   the only mode that is interactive.
    :param num_processors: number of processors used during simulation
    :param save_field_patterns: indicates whether field pattern h5 files
    are generated during the simulation (at points of high symmetry)
    :param convert_field_patterns: indicates whether field pattern h5
    files should be converted to png (only when postprocessing)
    :param containing_folder: the path to the folder which will contain
    the simulation subfolder.
    :param job_name_suffix: Optionally specify a job_name_suffix
    (appendix to the folder name etc.) which will be appended to the
    jobname created automatically from the most important parameters.
    :param bands_title_appendix: will be added to the title of the bands
    diagram.
    :param custom_k_space: By default, KSpaceTriangular with
    k_interpolation interpolation steps are used. Provide any KSpace
    object here to customize this. k_interpolation will then be ignored.
    :param modes: a list of modes to run. Possible are 'zeven', 'zodd'
    or '' (latter meaning no distinction). Default: ['zeven', 'zodd']
    :param substrate_material: the material of an optional substrate,
    see param material. Holes will not be extended into the substrate.
    Default: None, i.e. the substrate is air.
    :return: the Simulation object

    """
    mat = Dielectric(material)

    geom = Geometry(
        width=1,
        height=1,
        depth=supercell_z,
        triangular=True,
        objects=[
            Block(
                x=0,
                y=0,
                z=0,
                material=mat,
                #make it bigger than computational cell, just in case:
                size=(2, 2, thickness)),
            Rod(x=0, y=0, material='air', radius=radius)
        ])

    if substrate_material:
        geom.add_substrate(Dielectric(substrate_material),
                           start_at=-0.5 * thickness)

    if isinstance(custom_k_space, KSpace):
        kspace = custom_k_space
    else:
        kspace = KSpaceTriangular(k_interpolation=k_interpolation,
                                  use_uniform_interpolation=defaults.newmpb)

    # points of interest: (output mode patterns at these points)
    if save_field_patterns:
        poi = kspace.points()[0:-1]
    else:
        poi = []

    runcode = ''
    for mode in modes:
        if mode == '':
            runcode += ('(run %s)\n' % (defaults.default_band_func(
                poi, ' '.join(defaults.output_funcs_other))) +
                        '(print-dos 0 1.2 121)\n\n')
        else:
            if mode == 'zeven':
                outputfunc = ' '.join(defaults.output_funcs_te)
            else:
                outputfunc = ' '.join(defaults.output_funcs_tm)
            runcode += ('(run-%s %s)\n' %
                        (mode, defaults.default_band_func(poi, outputfunc)) +
                        '(print-dos 0 1.2 121)\n\n')

    jobname = 'TriHolesSlab_{0}_r{1:03.0f}_t{2:03.0f}'.format(
        mat.name, radius * 1000, thickness * 1000)

    sim = Simulation(jobname=jobname + job_name_suffix,
                     geometry=geom,
                     kspace=kspace,
                     numbands=numbands,
                     resolution=resolution,
                     mesh_size=mesh_size,
                     initcode=defaults.default_initcode,
                     postcode='',
                     runcode=runcode,
                     work_in_subfolder=path.join(containing_folder,
                                                 jobname + job_name_suffix),
                     clear_subfolder=runmode.startswith('s')
                     or runmode.startswith('c'))

    draw_bands_title = (
        'Hex. PhC slab; '
        '{0}, thickness={1:0.3f}, radius={2:0.3f}'.format(
            mat.name, geom.objects[0].size[2], geom.objects[1].radius) +
        bands_title_appendix)
    return do_runmode(sim,
                      runmode,
                      num_processors,
                      draw_bands_title,
                      plot_crop_y=0.8 / geom.substrate_index,
                      convert_field_patterns=convert_field_patterns,
                      field_pattern_plot_filetype=defaults.field_dist_filetype,
                      field_pattern_plot_k_selection=None,
                      x_axis_hint=[defaults.default_x_axis_hint,
                                   kspace][kspace.has_labels()])
Esempio n. 57
0
class SceneWindow(QMainWindow):

    '''
    Windows for presenting Canvas in real time. Show only one view
    To be Implemented -- Stereo views
    '''
    pixmapChanged = QtCore.pyqtSignal(int)

    def __init__(self):

        super().__init__()

        '''Fields'''

        self.simLimit = None
        self.simulationNo = None
        self.elementCount = None
        self.simLength = 0
        self.simulation = None
        self.frameList = []

        self.timer = QtCore.QElapsedTimer()



        ''' Window Properties'''

        self.Icon = QtGui.QIcon(str(ICON))
        self.setMinimumSize(self.sizeHint())
        self.resize(1600, 800)
        self.setWindowTitle('Vispy 3D')
        self.setWindowIcon(self.Icon)
    
        self.setMenuBar(DefaultMenuBar(self))

        ''' Setting window layout and central widget '''
        self.centralwidget = QtWidgets.QWidget()
        self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
        self.verticalLayout.setAlignment(QtCore.Qt.AlignCenter)


        ''' Frames'''
        self.canvasFrame = QGroupBox("Canvas Frame")
        self.controlFrame = QGroupBox("Control Frame")
        self.renderFrame = QGroupBox()

        self.canvasFrameLayout  = QVBoxLayout(self.canvasFrame)
        self.controlFrameLayout = QGridLayout(self.controlFrame)
        self.renderFrameLayout = QHBoxLayout()

        self.canvasWidget = QWidget()
        self.canvasWidget.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
        self.canvasWidgetLayout = QHBoxLayout()

        ''' Rendered Video of Scene'''
        self.canvasHolder = QWidget() #QtWidgets.QGraphicsView()#
        self.canvasHolder.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
        self.canvasHolderLayout = QVBoxLayout()
        self.canvasHolder.setLayout(self.canvasHolderLayout)

        ''' 'Image' Video of scene'''
        self.twoVideoWidget = QWidget()
        self.twoVideoWidget.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
        self.twoVideoWidgetLayout = QVBoxLayout()
        


        self.video1Widget = QtWidgets.QGraphicsView()
        self.video2Widget = QtWidgets.QGraphicsView()

        self.twoVideoWidgetLayout.addWidget(self.video1Widget)
        self.twoVideoWidgetLayout.addWidget(self.video2Widget)
        
        self.twoVideoWidget.setLayout(self.twoVideoWidgetLayout)

        '''Simulation parameters'''
        self.elementCount = QLabel("Elements")
        self.elementCount.setMaximumSize(150,50)
        self.elementCountBox = QLineEdit()
        self.elementCountBox.setMaximumSize(350,50)
        self.elementCountBox.setPlaceholderText('element count')
        self.elementCountBox.setText('10')


        self.setFunctionLabel = QLabel("Set Function")
        self.setFunctionLabel.setMaximumSize(150,50)
        self.setFunctionBox = QLineEdit()
        self.setFunctionBox.setMaximumSize(350,50)
        self.setFunctionBox.setPlaceholderText("input Function in Cartesian Co-ordSinates")

        self.zoomLevel = QPushButton("Zoom")
        self.zoomLevel.setMaximumSize(150,50)
        self.zoomInput = QLineEdit()
        self.zoomLevel.clicked.connect(self.setZoom)
        self.zoomInput.setPlaceholderText('Adjust zoom for camera - default scale is 10.')
        self.zoomInput.setMaximumSize(350,50)

        self.simulationLengthLabel = QLabel('Simulation Length')
        self.simulationLengthLabel.setMaximumSize(150,50)
        self.simulationLengthBox = QLineEdit()
        self.simulationLengthBox.setMaximumSize(350,50)
        self.simulationLengthBox.setPlaceholderText('No. of Frames - default is 100')
        self.simulationLengthBox.setText('500')

        self.simulationsLabel = QLabel('Simulations')
        self.simulationsLabel.setMaximumSize(150, 50)
        self.simulationsBox = QLineEdit()
        self.simulationsBox.setMaximumSize(350,50)
        self.simulationsBox.setPlaceholderText('No. of simulations')

        ''' Render Button'''
        self.renderButton = QPushButton('Render')
        self.renderButton.clicked.connect(self.handleRender)
        self.renderButton.setMaximumSize(250,50)
        # self.renderButton.setAlignment(QtCore.Qt.AlignCenter)

        self.controlFrameLayout.addWidget(self.elementCount, 0, 0 )
        self.controlFrameLayout.addWidget(self.elementCountBox, 0, 1)

        self.controlFrameLayout.addWidget(self.zoomLevel, 0, 2)
        self.controlFrameLayout.addWidget(self.zoomInput, 0, 3)

        self.controlFrameLayout.addWidget(self.setFunctionLabel,0, 4)
        self.controlFrameLayout.addWidget(self.setFunctionBox,0, 5)
        self.controlFrameLayout.addWidget(self.simulationLengthLabel, 0, 6)
        self.controlFrameLayout.addWidget(self.simulationLengthBox, 0, 7)
        self.controlFrameLayout.addWidget(self.simulationsLabel, 0, 8)
        self.controlFrameLayout.addWidget(self.simulationsBox, 0, 9)

        self.renderFrameLayout.addWidget(self.renderButton)
        # self.controlFrameLayout.setAlignment(self.canvasWidget, QtCore.Qt.AlignCenter)
        # self.canvasFrameLayout.addWidget(self.canvasWidget)
        
        self.renderFrame.setLayout(self.renderFrameLayout)
        self.canvasWidget.setLayout(self.canvasWidgetLayout)
        self.canvasFrameLayout.addWidget(self.canvasWidget)
        
        # self.canvasFrame.setLayout(self.canvasFrameLayout)

        self.verticalLayout.addWidget(self.canvasFrame)
        self.verticalLayout.addWidget(self.controlFrame)
        self.verticalLayout.addWidget(self.renderFrame)

        self.setCentralWidget(self.centralwidget)

    def setZoom(self):

        zoomFactor = self.zoomInput.text()

        try:
            zoomFactor = int(zoomFactor)
            if zoomFactor > 100 or zoomFactor  == 0:
                self.error = ErrorWindow("element count can`t be greater than 100 or equal to 0", self.Icon)
                self.error.show()
                return

        except Exception as e:

            self.error = ErrorWindow("Element count has to be an Integer", self.Icon)
            self.error.show()
            return
        self.simulation.view.camera.scale_factor = zoomFactor

    def handleRender(self):

        n = self.elementCountBox.text()
        simulationLength = self.simulationLengthBox.text()
        simulations = self.simulationsBox.text()

        if simulationLength:

            try:
                simulationLength = int(simulationLength)
                self.simLimit = simulationLength
            except Exception as e:

                self.error = ErrorWindow("Simulation length has to be an Integer, default -- 100 has been used", self.Icon)
                self.error.show()

        else: self.simLimit = 100
        
        if simulations:

            try:
                simulations = int(simulations)
                self.simulationNo = simulations
            except Exception as e:
                self.error = ErrorWindow("No of simulations has to be an integer - only one simulation will be ran", self.Icon)
                self.error.show()

        else: self.simulationNo = 1
                
        try:

            n = int(n)
            if n > 100:
                self.error = ErrorWindow("element count can`t be greater than 100", self.Icon)
                self.error.show()

                return

            else: self.elementCount = n

        except Exception as e:

            print(e)

            self.error = ErrorWindow("Element count has to be an Integer", self.Icon)
            self.error.show()

            return

        self.create_simulation(self.elementCount, 1)




    def create_simulation(self, elementCount, sim_index):

        
        self.th = QtCore.QThread()

        self.simulation = Simulation(elementCount, sim_index)
        self.simulation.moveToThread(self.th)
        self.simulation.changePixmap.connect(self.update)
        self.pixmapChanged.connect(self.simulation.motionControl)

        self.th.start()

        self.simulation.canvas.create_native()
        self.native = self.simulation.canvas.native
        self.canvasHolderLayout.addWidget(self.native)

        # print(self.simulation.canvas.size)
        # self.canvasHolderLayout.addWidget(self.simulation.canvas.native)
        self.canvasWidgetLayout.addWidget(self.canvasHolder)
        self.canvasWidgetLayout.addWidget(self.twoVideoWidget)

    

        self.pixmapChanged.emit(100)



    def setImage(self):

        'Return camera to neutral x position after rendering the two images'

        self.simulation.view.camera.scale_factor = 10

        self.simulation.canvas.size = 800,600

        self.simulation.view.camera.elevation = 20
        self.simulation.view.camera.azimuth = 85
        
        imageArray = self.simulation.canvas.render()
        rgbImage = cv2.cvtColor(imageArray, cv2.COLOR_BGR2RGB)
        image = QtGui.QImage(rgbImage.data, imageArray.shape[1], imageArray.shape[0], QtGui.QImage.Format_RGB888)
        

        scene1 = QtWidgets.QGraphicsScene()
        pixmapItem = QtWidgets.QGraphicsPixmapItem(QtGui.QPixmap.fromImage(image))
        scene1.addItem(pixmapItem)


        self.simulation.view.camera.elevation = 20
        self.simulation.view.camera.azimuth = 95

        imageArray = self.simulation.canvas.render()
        rgbImage = cv2.cvtColor(imageArray, cv2.COLOR_BGR2RGB)
        image = QtGui.QImage(rgbImage.data, imageArray.shape[1], imageArray.shape[0], QtGui.QImage.Format_RGB888)
        

        scene2 = QtWidgets.QGraphicsScene()
        pixmapItem = QtWidgets.QGraphicsPixmapItem(QtGui.QPixmap.fromImage(image))
        scene2.addItem(pixmapItem)

        self.video1Widget.setScene(scene1)
        self.video2Widget.setScene(scene2)

        'Return camera to neutral x position after rendering the two images'
        self.simulation.view.camera.elevation = 20
        self.simulation.view.camera.azimuth = 90

        self.simulation.canvas.render()
    
        np.set_printoptions(threshold=np.inf)

        self.simulation.frameList.append(FrameData(self.simulation.canvas.render(), self.simulation.elementList, self.simulation.view.camera.scale_factor, (800, 600), len(self.frameList)+1))
 

    def update(self):
        
        self.simLength += 1
        self.timer.start()
        self.setImage()
        dt = self.timer.elapsed()
        self.timer.restart()

        self.pixmapChanged.emit(dt)

        if self.simLength == self.simLimit:

            self.pixmapChanged.disconnect()
            self.simulation.changePixmap.disconnect()

            currSim = self.simulation.index
            self.writeData()

            if currSim < self.simulationNo: 

                self.th.exit()
                while not self.th.isFinished(): time.sleep(0.5)

                self.simLength = 0  
                self.canvasHolderLayout.removeWidget(self.native)
                self.create_simulation(self.elementCount, currSim + 1)
            
            else: self.close()

    def writeData(self):
        
        index = 0
        
        if not Path(r'..\..\logs\simulations\originalImages\simulation_{0}'.format(self.simulation.index)).is_dir(): os.makedirs(r'..\..\logs\simulations\originalImages\simulation_{0}'.format(self.simulation.index))
        if not Path(r'..\..\logs\simulations\velocityImages\simulation_{0}'.format(self.simulation.index)).is_dir(): os.makedirs(r'..\..\logs\simulations\velocityImages\simulation_{0}'.format(self.simulation.index))
        if not Path(r'..\..\logs\simulations\depthMatrix\simulation_{0}'.format(self.simulation.index)).is_dir(): os.makedirs(r'..\..\logs\simulations\depthMatrix\simulation_{0}'.format(self.simulation.index))
        

        for frameData in self.simulation.frameList:

            frameData.createMatrix()
            frameData.createMatrix(event='depth')

            filenameO = r'..\..\logs\simulations\originalImages\simulation_{0}\frame{1}.jpg'.format(self.simulation.index, index)
            filenameV = r'..\..\logs\simulations\velocityImages\simulation_{0}\frame{1}.jpg'.format(self.simulation.index, index)
            filenameD = r'..\..\logs\simulations\depthMatrix\simulation_{0}\frame{1}.jpg'.format(self.simulation.index, index)
            cv2.imwrite(filenameO, frameData.imageArray)
            cv2.imwrite(filenameV, frameData.velocityMatrix)
            cv2.imwrite(filenameD, frameData.depthMatrix)

            index  += 1
Esempio n. 58
0
from collections import OrderedDict
 
from simulation import Simulation
from evaluation.economics import Economics
from evaluation.performance import Performance
from evaluation.graphics import Graphics

#%% Define simulation settings

# Simulation timestep in seconds
timestep = 60*60
# Simulation number of timestep
simulation_steps = 24*365

#%% Create Simulation instance
sim = Simulation(simulation_steps=simulation_steps,
                 timestep=timestep)

#%% load hourly data
#Load timeseries irradiation data
sim.env.meteo_irradiation.read_csv(file_name='data/env/SoDa_Cams_Radiation_h_2006-2016_Arusha.csv',
                                   start=0, 
                                   end=simulation_steps)
#Load weather data
sim.env.meteo_weather.read_csv(file_name='data/env/SoDa_MERRA2_Weather_h_2006-2016_Arusha.csv', 
                               start=0, 
                               end=simulation_steps)

#Load load demand data
sim.load.load_demand.read_csv(file_name='data/load/load_dummy_h.csv', 
                              start=24, 
                              end=48)
class Test_Exact_Learning_Algorithms(unittest.TestCase):
    """Verifies that BPTT algorithms gives same aggregate weight change as
    RTRL for a very small learning rate, while also checking that the
    recurrent weights did change some amount (i.e. learning rate not *too*
    small that this is trivially true)."""
    @classmethod
    def setUpClass(cls):

        cls.task = Add_Task(4, 6, deterministic=True, tau_task=1)
        cls.data = cls.task.gen_data(400, 0)

        n_in = cls.task.n_in
        n_h = 16
        n_out = cls.task.n_out

        cls.W_in = np.random.normal(0, np.sqrt(1 / (n_in)), (n_h, n_in))
        M_rand = np.random.normal(0, 1, (n_h, n_h))
        cls.W_rec = np.linalg.qr(M_rand)[0]
        cls.W_out = np.random.normal(0, np.sqrt(1 / (n_h)), (n_out, n_h))
        cls.W_FB = np.random.normal(0, np.sqrt(1 / n_out), (n_out, n_h))

        cls.b_rec = np.zeros(n_h)
        cls.b_out = np.zeros(n_out)

    def test_small_lr_case(self):

        alpha = 1

        self.rnn_1 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        self.rnn_2 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        self.rnn_3 = RNN(self.W_in,
                         self.W_rec,
                         self.W_out,
                         self.b_rec,
                         self.b_out,
                         activation=tanh,
                         alpha=alpha,
                         output=softmax,
                         loss=softmax_cross_entropy)

        lr = 0.00001
        self.optimizer_1 = Stochastic_Gradient_Descent(lr=lr)
        self.learn_alg_1 = RTRL(self.rnn_1)
        self.optimizer_2 = Stochastic_Gradient_Descent(lr=lr)
        self.learn_alg_2 = Future_BPTT(self.rnn_2, 25)
        self.optimizer_3 = Stochastic_Gradient_Descent(lr=lr)
        self.learn_alg_3 = Efficient_BPTT(self.rnn_3, 100)

        monitors = []

        np.random.seed(1)
        self.sim_1 = Simulation(self.rnn_1)
        self.sim_1.run(self.data,
                       learn_alg=self.learn_alg_1,
                       optimizer=self.optimizer_1,
                       monitors=monitors,
                       verbose=False)

        np.random.seed(1)
        self.sim_2 = Simulation(self.rnn_2)
        self.sim_2.run(self.data,
                       learn_alg=self.learn_alg_2,
                       optimizer=self.optimizer_2,
                       monitors=monitors,
                       verbose=False)

        np.random.seed(1)
        self.sim_3 = Simulation(self.rnn_3)
        self.sim_3.run(self.data,
                       learn_alg=self.learn_alg_3,
                       optimizer=self.optimizer_3,
                       monitors=monitors,
                       verbose=False)

        #Assert networks learned similar weights with a small tolerance.
        assert_allclose(self.rnn_1.W_rec, self.rnn_2.W_rec, atol=1e-4)
        assert_allclose(self.rnn_2.W_rec, self.rnn_3.W_rec, atol=1e-4)
        #But that there was some difference from initialization
        self.assertFalse(
            np.isclose(self.rnn_1.W_rec, self.W_rec, atol=1e-4).all())
Esempio n. 60
0
no_batches = 0
fd = open("./output.txt", 'w+')


def percentile(arr):  ##function to calculate the 95th percentile
    k = (len(arr) - 1) * 0.95
    f = math.floor(k)
    c = math.ceil(k)
    if f == c:
        return arr[int(k)]
    x0 = arr[int(f)] * (c - k)
    x1 = arr[int(c)] * (k - f)
    return x0 + x1


simul_obj = Simulation(mc, cla, buffer, cls, clr)  ## Calling Simulation
while no_batches <= max_batches:
    fd.write("Batch No:" + str(no_batches) + "\n")
    fd.write("Requestid" + "				" + "T" + "									" + "D" + "\n")
    while i <= max_obsv:
        id, T, D = simul_obj.simulation(CLA, CLS, CLR, b_size, seed, i)
        if T != 0:
            fd.write(str(id) + "				" + str(T) + "									" + str(D) + "\n")
            arr_T.append(T)
            arr_D.append(D)
        i += 1
    ##calculate mean for T and D at the end of each batch
    arr_T = sorted(arr_T)
    arr_D = sorted(arr_D)
    ##calculating mean and 95th percentile for each batch
    mean_T.append(sum(arr_T) / len(arr_T))