예제 #1
0
    def __init__(self, arg):
        self.arg = arg
        self.simMode = arg.get('simMode', 1)
        self.config = arg.get('config', None)
        self.breakpoint = arg.get('breakpoint', None)
        self.plot = arg.get('plot', None)
        self.log = logging.getLogger('trader')

        self.tick_btce = api_btce.publicapi()
        self.tick_stamp = api_stamp.publicapi()

        if self.simMode != 0:
            self.tapi_btce = api_btce.tradeapi(self.config.apikey_btce,
                                               self.config.apisecret_btce)
            self.tapi_stamp = api_stamp.tradeapi(self.config.apikey_stamp,
                                                 self.config.apisecret_stamp,
                                                 self.config.apiid_stamp)

            self.hr_btce = historyRecorder(self.arg, "btce")
            self.hr_stamp = historyRecorder(self.arg, "stamp")
        else:
            self.hr_btce = simulator.simHistoryRecorder(self.arg, "btce")
            self.hr_stamp = simulator.simHistoryRecorder(self.arg, "stamp")
            self.sim_btce = simulator.simulator("btce", {
                "usd": 0,
                "btc": 1
            }, self.arg, self.hr_btce, 0.002)
            self.sim_stamp = simulator.simulator("stamp", {
                "usd": 500,
                "btc": 0
            }, self.arg, self.hr_stamp, 0.005)
예제 #2
0
def make_random(k):
    vtx = random_vertex()
    theta, phi = random_direction()
    direct = unit_vector(theta, phi)
    pts, deps = track_points(vtx, direct)
    print(len(deps))
    x, y, z = unzip(pts)
    scatter3d_module(x, y, z, deps, k)

    zero = geo.point_3d(0., 0., 0.)
    #box = geo.box(714*cm, 300*cm, 510*cm, zero, "ar_cube")
    module_box = geo.box(100.0 * cm, 300.0 * cm, 100 * cm, zero,
                         "rand" + str(k) + "_box")
    geometry = geo.geometry([module_box], zero)
    simul = sim.simulator(geometry)
    simul.supervisor._outfile = OUT_DIR + "rand_event" + str(k) + ".root"

    geom = simul.project_event(pts, deps, vtx)
    simul.set_minimal()
    simul.supervisor._truth.SetBinContent(1, vtx[0])
    simul.supervisor._truth.SetBinContent(2, vtx[1])
    simul.supervisor._truth.SetBinContent(3, vtx[2])
    simul.supervisor._truth.SetBinContent(4, theta)
    simul.supervisor._truth.SetBinContent(5, phi)
    simul.supervisor.write()
예제 #3
0
    def learningTrial(self):
        for _ in range(self.N):
            episode = []
            cstate = self.initialState()
            sim = simulator(self.mdp, currentS=cstate, terminalS=self.ts)
            T = 0
            visited = set()
            while True:
                a = self.exploreOrExploitPolicy(cstate)
                res = sim.takeAction(a)
                #print(res)
                if res == "end":
                    #                   print(111)
                    break
                r, _ = res
                episode.append((cstate, a, r))
                _, cstate = res
                T += 1

#          print(episode)
            for t in range(T):
                cstate, a, _ = episode[t]
                if (cstate, a) in visited:
                    continue
                visited.add((cstate, a))
                gt = 0
                for tt in range(t, T):
                    gt += episode[tt][2] * (self.beta**(tt - t))
                self.totalq[cstate][a] += gt
                self.countq[cstate][a] += 1
                self.q[cstate][
                    a] = self.totalq[cstate][a] / self.countq[cstate][a]
예제 #4
0
파일: MCTS.py 프로젝트: Micanga/MultiAgents
def create_temp_simulator(items, agents, main_agent):

    local_map = []
    row = [0] * 10

    for i in range(10):
        local_map.append(list(row))

    local_items = []
    for i in range(len(items)):
        (item_x, item_y) = items[i].get_position()
        local_item = item.item(item_x, item_y, items[i].level, i)
        local_item.loaded = items[i].loaded
        local_items.append(local_item)
        if not local_item.loaded:
            local_map[item_y][item_x] = 1

    local_agents = list()

    (a_agent_x, a_agent_y) = agents[0].get_position()
    local_map[a_agent_y][a_agent_x] = 8
    local_agent = agent.Agent(a_agent_x, a_agent_y, 'l1', 0)
    local_agents.append(local_agent)

    (m_agent_x, m_agent_y) = main_agent.get_position()
    local_map[m_agent_y][m_agent_x] = 9
    local_main_agent = agent.Agent(m_agent_x, m_agent_y, 'l1', 1)
    local_main_agent.set_level(main_agent.level)

    tmp_sim = simulator.simulator(local_map, local_items, local_agents,
                                  local_main_agent, 10, 10)
    return tmp_sim
예제 #5
0
def gen_ckpt(netname):
    model = ''

    sys.path.append("input")
    if 'vgg' in netname:
        from vggnet import vgg
        model = vgg(num_classes=1000)
    elif 'resnet18' in netname:
        from resnet import resnet18
        model = resnet18()
    elif 'resnet34' in netname:
        from resnet import resnet34
        model = resnet34()
    elif 'resnet50' in netname:
        from resnet import resnet50
        model = resnet50()
    elif 'simulator' in netname:
        from simulator import simulator
        model = simulator()

    import torch
    for m in model.modules():
        if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
            m.register_buffer(f'scale', torch.tensor(1.0))
    torch.save({
        'state_dict': model.state_dict(),
        'hyper_parameters': {
            'conv.scale' : 0.0,
            'act_quant_bit' : 8,
            'weight_quant_bit' : 8,
         },
    }, f'input/{netname}.ckpt')

    sys.path.append("..")
예제 #6
0
def generate_ppm_file(image, pi_estimator, nb_points_par_image, i, decimale):
    '''
    Cette fonction permet de générer une image au format ppm. Elle appelle
    la fonction simulation lui permettant de générer des points aléatoirement.
    Chaque estimation receuillie permet grâce à la méthode add_new_pi_estimate
    d'obtenir des valeurs convergentes vers pi. La valeur est ensuite écrite
    sur l'image créée et sauvegardée dans le dossier appelée out.
    '''
    # 1 - Obtention de l'estimation de pi
    list_blue, list_pink = [], []
    pi_estime = simulator.simulator(nb_points_par_image, list_blue, list_pink)

    # 2 - Coloration des pixels de l'image en fonction de leur distance au centre.
    color_image_with_points(image, list_blue, list_pink)

    if ARGUMENTS.p:
        pourcentage = validation_points(image, i, nb_points_par_image)
        print(f'Image {i+1} : {int(pourcentage)} %')

    # 3 - Nouvelle estimation ajoutée pour actualiser la moyenne globale
    pi_val = pi_estimator.add_new_pi_estimate(pi_estime)

    # 4 - Affichage de l'estimation de pi sur l'image
    # La copie entière de l'image permet d'écrire le nombre pi
    # sur celle-ci avant de la sauvegarder au format ppm.
    copie_image = copy.deepcopy(image)
    nb_decimal = f".{decimale}f"
    write_pi_on_image(copie_image, pi_val, nb_decimal)

    # 5 - Sauvegarde de l'image sur le disque dur
    partie_decimale = int((pi_val - int(pi_val)) * (10**decimale))
    imageio.imwrite(f"out/img{i}_{int(pi_val)}-{partie_decimale}.ppm",
                    copie_image)
예제 #7
0
def launch():
  t = time.time()
  network = q_network.DeepQLearner(input_width, input_height, num_actions,
                                         phi_length,
                                         discount,
                                         learning_rate,
                                         rms_decay,
                                         rms_epsilon,
                                         momentum,
                                         clip_delta,
                                         freeze_interval,
                                         batch_size,
                                         network_type,
                                         update_rule,
                                         batch_accumulator,
                                         rng)
  print 'compile network .. done' , time.time()-t
  agt = agent.NeuralAgent(network,
                                  epsilon_start,
                                  epsilon_min,
                                  epsilon_decay,
                                  replay_memory_size,
                                  experiment_prefix,
                                  replay_start_size,
                                  update_frequency,
                                  rng)

  print 'create agent & simulator .. done'
  env = simulator.simulator()
  exp = experiment(agt,env)
  exp.run_episode(50)
예제 #8
0
def generate_data_labels(img_size, x0, y0, SNR, N):
    prev_time = time.time()

    sim = simulator.simulator(img_size, 1)

    data_tr_for_different_snr = []
    labels_tr_for_different_snr = []
    for snr in SNR:
        data_tr = []
        labels_tr = []
        for i in range(N):
            r = np.random.rand()
            if r > 0.5:
                labels_tr.append(1)

                data = sim.create_simulation_from_SNR(x0, y0, snr)
                data_tr.append(data)
            else:
                labels_tr.append(0)
                data = sim.create_simulation_from_SNR(x0,
                                                      y0,
                                                      snr,
                                                      no_atom=True)
                data_tr.append(data)
        data_tr_for_different_snr.append(np.array(data_tr))
        labels_tr_for_different_snr.append(np.array(labels_tr))

    print(f"time used: {time.time() - prev_time}")
    return np.array(data_tr_for_different_snr), np.array(
        labels_tr_for_different_snr)
예제 #9
0
def update_models2(belief, state, expert_models, p, num_models, state_rewards):
    _rewards = np.zeros(num_models)
    next_state = np.zeros(num_models)
    for i in range(0, num_models):
        next_state[i], _rewards[i] = sim.simulator(state, p, expert_models[i],
                                                   state_rewards)
    return int(np.random.choice(next_state, 1, p=belief)[0])
예제 #10
0
파일: gui3.py 프로젝트: nielsdb/zeppelin
 def toggleSimulator(self):
     if self.sim == None:
         enemy = "goudsimu"
         self.zeppelins.append(AbstractZeppelin("red", enemy))
         mq.initEnemy(enemy)
         self.sim = simulator.simulator(100,100, enemy)
         return
     self.sim.toggleActive()
예제 #11
0
 def __init__(self, global_assignment, name, trainer, cfg):
     self.name = str(name) + "_worker_" + str(global_assignment)
     self.global_assignment = self.name[-1]  #get global assignment index
     self.trainer = trainer  #get initialized Adam optimizer
     self.cfg = cfg
     self.local_AC = AC_Network(self.cfg.s_size, self.cfg.a_size, self.name,
                                trainer)  #set up worker
     self.env = simulator(a_size)
     self.experience = experience(self.cfg)
예제 #12
0
def update_models(state, expert_models, p, num_models, state_rewards,
                  real_world_expert):
    _rewards = np.zeros(num_models)
    next_state = np.zeros(num_models)
    for i in range(0, num_models):
        next_state[i], _rewards[i] = sim.simulator(state, p, expert_models[i],
                                                   state_rewards)

    return int(next_state[real_world_expert]), _rewards
예제 #13
0
def getConfigurationMatrix():
    powerSimulator = simulator()
    heatMatrix = [[
        powerSimulator.runSimulation(
            [batteryCapacity / 100.0, panelArea / 100.0])
        for batteryCapacity in xrange(0, 1001, 50)
    ] for panelArea in xrange(0, 2001, 100)]

    with open('results.txt', 'w') as outFile:
        outFile.write(str(heatMatrix))
예제 #14
0
 def __init__(self, name, trainer, cfg):
     self.name = "worker_" + str(name)
     self.number = name
     self.trainer = trainer
     self.cfg = cfg
     self.local_AC = AC_Network(self.cfg.s_size, self.cfg.a_size, self.name,
                                trainer)
     self.update_local_ops = update_target_graph('global', self.name)
     self.env = simulator(a_size)
     self.experience = experience(self.cfg)
예제 #15
0
def test_redraw(config):
    transformer = image_normalize('background')
    db = coco(config, 'train', transform=transformer)
    output_dir = osp.join(config.model_dir, 'test_redraw')
    maybe_create(output_dir)

    env = simulator(db, config.batch_size)
    env.reset()

    loader = DataLoader(db,
                        batch_size=config.batch_size,
                        shuffle=True,
                        num_workers=config.num_workers)

    for cnt, batched in enumerate(loader):
        out_inds = batched['out_inds'].long().numpy()
        out_vecs = batched['out_vecs'].float().numpy()

        # sequences = []
        for i in range(out_inds.shape[1]):
            frames = env.batch_render_to_pytorch(out_inds[:, i], out_vecs[:,
                                                                          i])
            # sequences.append(frames)
        # sequences = torch.stack(sequences, dim=1)
        sequences = env.batch_redraw(True)
        # sequences = [tensors_to_imgs(x) for x in sequences]

        for i in range(len(sequences)):
            sequence = sequences[i]
            image_idx = batched['image_index'][i]
            name = '%03d_' % i + str(image_idx).zfill(12)
            out_path = osp.join(output_dir, name + '.png')
            color = cv2.imread(batched['color_path'][i], cv2.IMREAD_COLOR)
            color, _, _ = create_squared_image(color)

            fig = plt.figure(figsize=(32, 16))
            plt.suptitle(batched['sentence'][i], fontsize=30)

            for j in range(min(len(sequence), 14)):
                plt.subplot(3, 5, j + 1)
                partially_completed_img = clamp_array(sequence[j], 0,
                                                      255).astype(np.uint8)
                partially_completed_img = partially_completed_img[:, :, ::-1]
                plt.imshow(partially_completed_img)
                plt.axis('off')

            plt.subplot(3, 5, 15)
            plt.imshow(color[:, :, ::-1])
            plt.axis('off')

            fig.savefig(out_path, bbox_inches='tight')
            plt.close(fig)

        break
예제 #16
0
파일: houston.py 프로젝트: jbellevi/houston
 def second_thread(self):
     while not self.stop.is_set():
         self.reset_serial_flag = False  # if true, will close out the serial 
         self.uart_tab.set_port_info(self.serialPort,'disconnected')   # update the name of the serial port
         if self.serialPort is not 'simulator':
             self.connect_serial()
         else:
             self.sim = simulator()
             self.execute_simulator()
     else:
         return
예제 #17
0
def getOptimalConfiguration():
    powerSimulator = simulator()
    results = la.getBestParameters(powerSimulator.runSimulation, 2, 0, 300,
                                   0.01)

    with open('results.txt', 'w') as file:
        file.write(str(results))

    print(results[1][0])

    pd.Series(results[0]).plot()
예제 #18
0
    def __init__(self, arg):
        self.arg = arg
        self.simMode = arg.get('simMode', 1)
        self.config = arg.get('config', None)
        self.breakpoint = arg.get('breakpoint', None)
        self.plot = arg.get('plot', None)
        self.log = logging.getLogger('trader')

        self.tick_btce = api_btce.publicapi()
        self.tick_stamp = api_stamp.publicapi()

        if self.simMode != 0:
            self.tapi_btce = api_btce.tradeapi(self.config.apikey_btce, self.config.apisecret_btce)
            self.tapi_stamp = api_stamp.tradeapi(self.config.apikey_stamp, self.config.apisecret_stamp, self.config.apiid_stamp)

            self.hr_btce = historyRecorder(self.arg, "btce")
            self.hr_stamp = historyRecorder(self.arg, "stamp")
        else:
            self.hr_btce = simulator.simHistoryRecorder(self.arg, "btce")
            self.hr_stamp = simulator.simHistoryRecorder(self.arg, "stamp")
            self.sim_btce = simulator.simulator("btce", {"usd": 0, "btc": 1}, self.arg, self.hr_btce, 0.002)
            self.sim_stamp = simulator.simulator("stamp", {"usd": 500, "btc": 0}, self.arg, self.hr_stamp, 0.005)
예제 #19
0
def statrunsTimeDone():

    num_runs = 20

    output_file = "../results/tLimited_auction.txt"

    file = open(output_file, 'w')

    num_rows = 10
    row_len = 8
    num_bins = 100
    num_workers = 15
    num_bots = 8

    # 0 = greedy, 1 = auction based, 2= replanning
    coord_method = 2

    writeHeader(file, num_rows, row_len, num_bins, num_workers, num_bots,
                coord_method, '0')

    file.write("percentage picked, ")
    file.write("time wasted\n")

    time_done = []

    for i in range(num_runs):

        sim = simulator(num_rows, row_len, num_bots, num_bins, num_workers)
        coord = coordinator(coord_method)
        plnr = planner(sim)
        time_taken = 0

        while (sim.applesLeft() != 0 and time_taken < 10000):
            plan = coord.cordStep(sim)

            plan = plnr.getPlan(plan, sim)

            sim.step()

            #sim.drawSimulator()

            time_taken += 1

        time_done.append(time_taken)

        print "done running iteration: ", i
    print "mean percent picked: ", np.mean(time_done)
    print "std percent picked: ", np.std(time_done)
예제 #20
0
 def learningTrial(self):
     for _ in range(self.N):
         cs = self.initialState()
         sim = simulator(self.mdp, currentS=cs, terminalS=self.ts)
         while True:
             #               print(cs, sim.cs)
             a = self.exploreOrExploitPolicy(cs)
             res = sim.takeAction(a)
             if res == "end":
                 break
             r, ns = res
             #                print(cs, ns)
             ga = self.greedyPolicy(ns)
             self.q[cs][a] = self.q[cs][a] + self.alpha * (
                 r + (self.beta * self.q[ns][ga]) - self.q[cs][a])
             cs = ns
예제 #21
0
파일: main.py 프로젝트: SuerpX/CS533
def eachSimulation(mdp, p, beta, maxSteps, pOfParkingRandom, terminalState):
    _, A, _, _ = mdp
    steps = 0
    cs = 0
    reward = 0
    sim = simulator(mdp, currentS=cs, terminalS=terminalState)
    while steps < maxSteps:
        res = sim.takeAction(p(mdp, cs, p=pOfParkingRandom))
        if res == "end":
            break
        elif res == "illegal":
            steps -= 1
        else:
            r, cs = res
            reward += (beta**steps) * r
        steps += 1
    return reward
예제 #22
0
    def __init__(self, port):
        self.port = port
        self.simulator = simulator.simulator()
        self.gui = simulator_gui.simulator_gui(self.simulator)

        self.led_yellow = output_dummy(self.gui.set_yellow_led)
        self.led_green = output_dummy(self.gui.set_green_led)

        self.heater = output_dummy(self.simulator.set_heater)
        self.sirup_pump = output_dummy(self.simulator.set_sirup)
        self.water_pump = output_dummy(self.simulator.set_water)
        self.sirup_valve = output_dummy(self.simulator.set_sirup_valve)
        self.water_valve = output_dummy(self.simulator.set_water_valve)

        self.keypad = sensor_dummy(self.gui.get_keypad)
        self.reflex = sensor_dummy(self.simulator.get_cup)
        self.temperature = sensor_dummy(self.simulator.read_temp)
        self.distance = sensor_dummy(self.simulator.read_mm)
예제 #23
0
def upload_files():
    sm = simulator()
    sm.admin_login(name, pw)
    file_index = get_file_index()
    pass_filename = ['README.md', '_config.yml']
    for file_name in file_index:
        file_name = file_name.strip()
        if file_name not in pass_filename:
            file_content = open(get_file_path() + file_name.strip(),
                                'r').read()
            data = {
                'title': file_name,
                'content': file_content,
                'event_time':
                common_tools.time_to_str(int(time.time())).split()[0],
                'post_items': []
            }
            res = sm.upload(data)
            print(res)
예제 #24
0
 def eachSimulation(self, maxSteps):
     _, A, _, _ = self.mdp
     steps = 0
     cs = self.initialState()
     reward = 0
     sim = simulator(self.mdp, currentS=cs, terminalS=self.ts)
     while steps < maxSteps:
         #           print(self.greedyPolicy(cs))
         res = sim.takeAction(self.greedyPolicy(cs))
         if res == "end":
             break
         elif res == "illegal":
             steps -= 1
         else:
             r, cs = res
             reward += (self.beta**steps) * r
         steps += 1
     # print(steps)
     return reward
예제 #25
0
def main():
    num_rows = 10
    row_len = 8
    num_bins = 100
    num_workers = 8
    num_bots = 5

    # 0 = greedy, 1 = auction based, 2= replanning
    coord_method = 0

    num_timestep = 100

    sim = simulator(num_rows, row_len, num_bots, num_bins, num_workers)
    coord = coordinator(coord_method)
    plnr = planner(sim)
    sim.drawSimulator()

    for timestep in range(num_timestep):
        plan = coord.cordStep(sim)
        # print sim.getIdleBots()
        # for item in plan:
        #   print "Robot: ", item.robot_id
        #   print "Goals: ", item.locations
        # print "here"

        plan = plnr.getPlan(plan, sim)
        # print "Planner Plan", plan
        sim.drawSimulator()
        # print "apples picked: ",sim.apples_picked

        # for key in sim.bots.keys():
        #   print key, sim.bots[key].plan
        #raw_input()
        print timestep

        sim.step()
    print "done running"
    print "total num apples: ", sim.total_apples
    print "num apples picked: ", sim.apples_picked
    print "percentage picked: ", sim.apples_picked / sim.total_apples * 100
    print "wasted time: ", sim.wasted_time
    plt.show()
    return 0
예제 #26
0
    def action(self):

        #       self.qTable = {}

        lactions = self.legalAction(self.gb.board)
        #       print(lactions)
        for i in range(self.nos):
            sim = simulator(board=numpy.copy(self.gb.board),
                            stepsLimited=self.sl,
                            beta=self.beta,
                            discounted=self.d)
            sboard = sim.sgb.board.tostring()
            unactions = []
            backQ = []
            for a in lactions:
                if (sboard, a) not in self.qTable:
                    unactions.append(a)
            la = lactions
            #           print(len(unactions))
            while len(unactions) == 0 and not sim.sgb.islost:
                ucbA = self.UCBPolicy(sboard, la)
                backQ.append((sboard, ucbA))
                sim.takeAction(ucbA)
                sboard = sim.sgb.board.tostring()
                la = self.legalAction(sim.sgb.board)
                unactions = []
                for a in la:
                    if (sboard, a) not in self.qTable:
                        unactions.append(a)
            if len(unactions) != 0:
                simA = unactions[randint(0, len(unactions) - 1)]
                backQ.append((sim.sgb.board.tostring(), simA))
                sim.takeAction(simA)
            r = sim.simulation(sim.randomPolicy)
            #         print(r)
            #           print(backQ)
            for backS, backA in backQ[::-1]:
                if (backS, backA) not in self.qTable:
                    self.qTable[backS, backA] = [0, 0]
                self.qTable[backS, backA][0] += r
                self.qTable[backS, backA][1] += 1
        return self.greedyPolicy(self.gb.board.tostring(), lactions)
예제 #27
0
def conversation(bot, update):
    input_text = update.message.text
    chat_id = update.message.chat_id
    print(chat_id, input_text)
    if input_text == 'exit':
        send_message(bot, chat_id, 'bye')
        return

    dbm = DBManager()
    code = dbm.get_code(input_text)
    if code is None:
        send_message(bot, chat_id, 'not found.')
        return
    poten_datas = dbm.get_target_forecast(code)
    forecast_msg = get_forecast_explain(poten_datas)
    simul_msg = simulator.simulator(code=code)
    print(forecast_msg)
    print(simul_msg)
    send_message(bot, chat_id, forecast_msg)
    send_message(bot, chat_id, simul_msg)
예제 #28
0
def __main__():

    geometry = sim.default_geo()

    data_file = root.TFile.Open("~/hex/berk/nersc/data/jackBig.root")

    tree = data_file.Get("tree")

    dummy = rd.supervisor()
    d = rd.data(tree, dummy)
    d.stack()
    for k, event_number in enumerate(events):
        print("SIMULATING EVENT: " + str(event_number))
        simul = sim.simulator(geometry)
        d = rd.data(tree, simul.supervisor)
        p, e, vtx = d.get_event_w_shift(event_number, module_box._xwalls,
                                        module_box._ywalls, module_box._zwalls)

        geom = simul.project_event(p, e, vtx)
        simul.get_stats()
        simul.supervisor.write()
def effect_k_scenario():
    logger = logging.getLogger('upgrade_product_logger')
    logger.debug('Effect of K simulation')
    kvalues = [6, 7, 8, 9, 10]
    dims = np.full(1, defaultDim, dtype='int32')
    datasizes = np.full(1, defaultDataSize, dtype='int32')
    dists = np.array(['corr', 'uni', 'anti'])
    algNames = np.array(['Upgrade_Algorithm', 'New_Upgrade_Algorithm'])
    simSettings = generate_simulation_settings(algNames, datasizes, dims, dists, kvalues)
    for setting in simSettings:
        ProductFileName = getProductFileName(int(setting[dimIndex]))
        ProductBuffer = np.loadtxt(ProductFileName, delimiter=',', dtype='int32')
        ProductIndex = 0
        for iter in range(numIteration):
            DataFileName = getDataFileName(setting[distIndex], int(setting[datasizeIndex]), int(setting[dimIndex]), iter)
            logger.debug('[info]Dist = %s, dataSize = %d, dim = %d, k = %d, iter = %d data_file = %s' %
                         (setting[distIndex], int(setting[datasizeIndex]), int(setting[dimIndex]),
                          int(setting[kValueIndex]), iter, DataFileName))
            aSim = sim.simulator(kValue=int(setting[kValueIndex]), fileName=DataFileName, algName=setting[algIndex],
                          product=ProductBuffer[ProductIndex])
            aSim.run()
            ProductIndex += 1
예제 #30
0
def policy_switching(start_state, experts, num_models, w, h, policy,
                     state_rewards, _belief):
    num_policies = policy.shape[0]
    reward_pol_ex = np.zeros((num_policies, num_models))
    for pol in range(0, num_policies):  # iterate through each policy
        for m in range(0, num_models):  # iterate through each expert
            ave_reward = 0  # holds average reward of expert after w policy evaluations
            for _h in range(0, w):  # run each policy on expert w times
                val, val_temp, state = 0, 0, start_state
                for j in range(0, h):  # perform h steps of policy rollout
                    next_s, val_temp = sim.simulator(state, policy[pol],
                                                     experts[m], state_rewards)
                    val += val_temp
                    state = next_s
                ave_reward += val
            reward_pol_ex[pol][m] = ave_reward / w  # ave reward after w calls

    # determine best policy based on p(m1)*R11 + p(m2)*R12 > or < p(m1)*R21 + p(m2)*R22
    scaled_val = np.zeros(num_policies)
    for i in range(0, num_policies):
        scaled_val[i] = np.sum((reward_pol_ex[i] * _belief))
    return np.argmax(scaled_val)
예제 #31
0
    def action(self):
        lactions = self.legalAction()
        self.rewardOfActions = {}
        for _ in range(self.nos):
            sim = simulator(board = numpy.copy(self.gb.board), stepsLimited = self.sl)
            unactions = []
            if len(self.rewardOfActions) != len(lactions) * 2:
                for a in lactions:
                    if (a, 't') not in self.rewardOfActions:
                        unactions.append(a)
                a = unactions[randint(0, len(unactions) - 1)]
            else:
                 a = self.exploreOrExploitPolicy(lactions)
            sim.takeAction(a)
            r = sim.simulation(sim.randomPolicy)

            if (a, 't') not in self.rewardOfActions:
                self.rewardOfActions[a, 't'] = 0
                self.rewardOfActions[a, 'c'] = 0
            self.rewardOfActions[a, 't'] += r
            self.rewardOfActions[a, 'c'] += 1
        return self.greedyPolicy(lactions)
예제 #32
0
파일: wraper.py 프로젝트: teamZeta/arp2
import matplotlib.image as mpimg
import numpy as np
from matplotlib.patches import Rectangle
import vot
import sys
import time
import cv2
import numpy
import collections
from camShift import camShift


sim = True
if sim:
    #handle = simulator.simulator("/home/boka/arp/david/")
    handle = simulator.simulator("/home/boka/arp/vot-toolkit/workspace/sequences/cup/")
    #handle = simulator.simulator("/home/boka/arp/vot-toolkit/workspace/sequences/woman/")
    #handle = simulator.simulator("/home/boka/arp/vot-toolkit/workspace/sequences/juice/")
    #handle = simulator.simulator("/home/boka/arp/vot-toolkit/workspace/sequences/jump/")
else:
    handle = vot.VOT("rectangle")
selection = handle.region()

imagefile = handle.frame()
print("prvo")
if not imagefile:
    sys.exit(0)

# image = cv2.imread(imagefile, cv2.IMREAD_GRAYSCALE)
image = cv2.imread(imagefile, cv2.IMREAD_COLOR)
print(imagefile)
예제 #33
0
                          args.timeshape, args.njobs, args.est_factor, seed)
final_results = shelve.open(os.path.join(args.dirname, fname))

for name, scheduler, errfunc, args.iterations in instances:

    print(name, end='')

    if args.iterations is None:
        # if no. of iterations is None, it means that a single pass is
        # enough (no randomness there)
        if name in final_results:
            continue
        else:
            args.iterations = 1

    scheduler_results = final_results.get(name, [])

    for i in range(args.iterations - len(scheduler_results)):
        results = list(simulator.simulator(jobs, scheduler, errfunc))
        sojourns = numpy.zeros(n_jobs)
        for compl, jobid in results:
            sojourns[job_idxs[jobid]] = compl - job_start[jobid]
        scheduler_results.append(sojourns)
        print('', sojourns.mean(), end='')
        sys.stdout.flush()
    print()

    final_results[name] = scheduler_results

final_results.close()
예제 #34
0
    #def getCurrentMap from the simulator, which builds a map of 1s and 0s
    #from the state of the map
    # def getCurrentMap(self,sim):
    #Returns a map of 1s and 0s based on obstacles in the env

    #Build 1s and 0s for the terrain type
    # terrain=np.zeros((sim.orchard_map.shape[0],sim.orchard_map.shape[1]))


if __name__ == '__main__':
    l = [[1, [1, 1, 'G']], [2, [10, 11, 'G'], [7, 5, 'P']], [3, [4, 6]]]
    # p=planner()
    # p.getPlan(l)
    # print p.robotIDs
    # print p.robotTargets
    s = simulator(10, 20, 5, 30, 5)
    num_rows = 5
    row_size = 8
    p = planner(s)
    p.getPlan(l, s)
    # print s.orchard_map[0][1].terrain
    # astar2d()

    # OLD MAP INITIALIZATION CODE
# m=np.zeros((row_size+2,num_rows*2-1))
#make every other row full of trees
# m[:,0::2]=np.ones((m[:,0::2].shape[0],m[:,0::2].shape[1]))
# orch_module=np.array([1,1,0])
# orch=np.array([])
# for k in range(num_rows):
#   orch=np.concatenate((orch,orch_module))
    print(name, end='')

    sojourns_per_priority = {pri: [] for pri in range(1, 6)}
    
    if args.iterations is None:
        # if no. of iterations is None, it means that a single pass is
        # enough (no randomness there)
        if name in final_results:
            continue
        else:
            args.iterations = 1

    scheduler_results = final_results.get(name, [])

    for i in range(args.iterations - len(scheduler_results)):
        results = list(simulator.simulator(jobs, scheduler, errfunc,
                                           weights))
        sojourns = numpy.zeros(args.njobs)
        for compl, jobid in results:
            sojourn = compl - job_start[jobid]
            sojourns[jobid] = sojourn
            sojourns_per_priority[priorities[jobid]].append(sojourn)
        scheduler_results.append(sojourns)
        print('', sojourns.mean(), end='')
        sys.stdout.flush()
    final_results[name] = scheduler_results
    print({pri: numpy.array(s).mean()
           for pri, s in sojourns_per_priority.items()})
    print()

final_results.close()
예제 #36
0
    ('LIFO', schedulers.LIFO),
    ('LIFO_SR', schedulers.LIFO_SR),
    ('PS', schedulers.PS),
    ('SRPT', schedulers.SRPT),
    ('SRPTPS', schedulers.SRPT_plus_PS),
    ('FSP', schedulers.FSP),
    ('FSP+PS', schedulers.FSP_plus_PS),
    ('LAS', schedulers.LAS),
    ('FSP+LAS', schedulers.FSP_plus_LAS),
    ('SRPTLAS', schedulers.SRPT_plus_LAS),
    ('WFQEGPS', schedulers.WFQE_GPS),
]

results = {}
for name, scheduler in instances:
    sim = simulator.simulator(jobs, scheduler,
                              simulator.fixed_estimations(estimations))
    results[name] = {jobid: t for t, jobid in sim}


head_fmt = '\t'.join(['{}'] * (len(instances) + 4))
fmt = '\t'.join(['{}'] + ['{:.2f}'] * (len(instances) + 3))

scheduler_names = [n for n, _ in instances]
header = head_fmt.format('Job', 'Arr.', 'Size', 'Est.', *scheduler_names)
print(header)
print('=' * len(header.expandtabs()))
for (jobid, arrival, d), e in zip(jobs, estimations):
    print(fmt.format(jobid, arrival, d, e,
                     *(results[n][jobid] for n in scheduler_names)))
예제 #37
0
 def run(self, verbose=False, **options):
     simu = simulator(self)
     simu.run(verbose=verbose, **options)
     self.raw = simu.raw
        self.assertTrue(kdom_util.kDomByPointsPy(self.buf[4], self.buf, 5))
        self.assertTrue(kdom_util.kDomByPointsPy(self.buf[4], self.buf, 6))
        self.assertTrue(kdom_util.kDomByPointsPy(self.buf[4], self.buf, 4))
        self.assertTrue(kdom_util.kDomByPointsPy(self.buf[0], self.buf, 3))
        self.assertFalse(kdom_util.kDomByPointsPy(self.buf[0], self.buf, 6))
        self.assertFalse(kdom_util.kDomByPointsPy(self.buf[0], self.buf, 5))
        self.assertFalse(kdom_util.kDomByPointsPy(self.buf[3], self.buf, 6))
        self.assertTrue(kdom_util.kDomByPointsPy(self.buf[3], self.buf, 5))

    def test_retrieve_KSkylines(self):
        kdomSky = kdom_util.retrieveKDomSkylinePy(self.buf, 4)
        self.assertEqual(2, len(kdomSky))
        self.assertTrue(np.array_equal(np.array([[1,1,1,3,3,3], [3,3,3,1,1,1]]), kdomSky))
        kdomSky = kdom_util.retrieveKDomSkylinePy(self.buf, 5)
        self.assertEqual(3, len(kdomSky))
        self.assertTrue(np.array_equal(np.array([[1,1,1,3,3,3], [3,3,3,1,1,1],[2,2,2,4,2,2]]), kdomSky))



if __name__ == '__main__':
    #unittest.main()
    pathName = os.path.dirname(os.path.abspath(__file__))
    ProductBuffer = np.loadtxt(pathName + "\data\product_uni_1_5_0.db", delimiter=",")
    aSim = sim.simulator(kValue=4, fileName=os.path.dirname(os.path.abspath(__file__)) + '\data\data_corr_100_5_0.db',
                         algName='Upgrade_Algorithm',
                         product=ProductBuffer[0])
    start_time = time.time()
    aSim.run()
    end_time = time.time()
    print(end_time-start_time)
예제 #39
0
파일: wraper.py 프로젝트: teamZeta/arp
#handle = vot.VOT("rectangle")
import vot
import sys
import time
import cv2
import numpy
import collections
import flow
import simulator
import ORF
from ncc import NCCTracker
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from matplotlib.patches import Rectangle
handle = simulator.simulator("/home/boka/arp/david/")
selection = handle.region()

imagefile = handle.frame()
print("prvo")
if not imagefile:
    sys.exit(0)

#image = cv2.imread(imagefile, cv2.IMREAD_GRAYSCALE)
image = cv2.imread(imagefile, cv2.IMREAD_COLOR)
print(imagefile)
tracker = NCCTracker(image, selection)
tracker_flow = flow.flow(image, selection)
tracker_OT = ORF.flow(image, selection)
print("do tukej")
plt.ion()
예제 #40
0
 def run_jobs(self, jobs, error=simulator.identity):
     return list(simulator.simulator(jobs, self.scheduler, error))
예제 #41
0
        
import simulator

#sim=simulator(z=3,V=3,m=3)
#sim.make_W(a_active_W=10,b_active_W=1e5)
#sim.make_H(a_active_H=10,b_active_H=1,n=100)
#sim.make_X()
#dir_save="/global/data/tcga/metadata/data/"
#prefix="simulated_data"
#sim.save(dir_save,prefix)

a_active_W=10
b_active_W=1e5
a_active_H=10
b_active_H=1
sim=simulator.simulator(z=3,V=3,m=10)
sim.make_W(a_active_W=10,b_active_W=1e5)
sim.make_H(a_active_H=10,b_active_H=1,n=1000)
sim.make_X()

# save
dir_save="/home/yifeng/research/mf/mvmf_v1_1/data/"
prefix="simulated_data" + "_a_active_W="+str(a_active_W) + "_b_active_W="+str(b_active_W) + "_a_active_H="+str(a_active_H) + "_b_active_H="+str(b_active_H) 
sim.save(dir_save,prefix)






예제 #42
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr  8 10:08:26 2019

@author: tamird
"""

import simulator

# Test the simulator.py
coord = "/home/data/mashas/tamir/adpy/testing/lig.pdb"
param = "/home/data/mashas/tamir/adpy/testing/pf.mdp"

sim = simulator.simulator(coord, param)

sim.prepare_topology("prepare")

sim.run("run01")
예제 #43
0
final_results = shelve.open(result_fname)

for name, scheduler, errfunc, args.iterations in instances:

    print("scheduler:", name)

    if args.iterations is None:
        # if no. of iterations is None, it means that a single pass is
        # enough (no randomness there)
        if name in final_results:
            continue
        else:
            args.iterations = 1

    scheduler_results = final_results.get(name, [])

    for i in range(args.iterations - len(scheduler_results)):
        results = list(simulator.simulator(jobs, scheduler, errfunc))
        sojourns = zeros(n_jobs)
        for compl, jobid in results:
            sojourns[job_idxs[jobid]] = compl - job_start[jobid]
        scheduler_results.append(sojourns)
        print(sojourns.mean(), end=' ')
        sys.stdout.flush()
    print()

    final_results[name] = scheduler_results
    print()

final_results.close()