Esempio n. 1
0
        for sysobj in sorted(sys_matches, key=lambda t: t.name):
          print("  {0}{1}".format(sysobj.to_string(), " ({0})".format(sysobj.id) if self.args.show_ids else ""))
          if self.args.list_stations:
            # TODO: Maybe roll this into the original query somehow
            stlist = envdata.find_stations(sysobj)
            stlist.sort(key=lambda t: (t.distance if t.distance else sys.maxsize))
            for stn in stlist:
              print("        {0}".format(stn.to_string(False)))
        print("")

    if (self.args.stations or not self.args.systems) and len(stn_matches) > 0:
      print("")
      print("Matching stations:")
      print("")
      for stnobj in sorted(stn_matches, key=lambda t: t.name):
        print("  {0}{1}".format(stnobj.to_string(), " ({0})".format(stnobj.id) if self.args.show_ids else ""))
      print("")

    if len(sys_matches) == 0 and len(stn_matches) == 0:
      print("")
      print("No matches")
      print("")

    return True


if __name__ == '__main__':
  env.start()
  a = Application(env.local_args, False)
  a.run()
Esempio n. 2
0
il_update_freq = config.il_update_freq
my_buffer = deque()  # bi-directional efficient list
replay_memory = config.replay_memory
# Make a path for our model to be saved in.
path = config.path
if not os.path.exists(path):
    os.makedirs(path)
i = 0
init = tf.global_variables_initializer()
saver = tf.train.Saver()

if __name__ == '__main__':
    with tf.Session() as sess:
        sess.run(init)
        if (total_steps < config.il_steps):
            s = env.start(total_steps)
            d = False
            # The Q-Network
            while not d:
                a = env.auto_move()
                s1, r, d = env.step(config.valid_actions[a])
                total_steps += 1
                my_buffer.append(
                    [s, a, r, s1,
                     d])  # Save the experience to our episode buffer.
                if len(my_buffer) > replay_memory:
                    my_buffer.popleft()

                if total_steps % il_update_freq == 0:
                    DQN.update_network(sess, my_buffer)
Esempio n. 3
0
                 sess)  # Update the target network toward the primary network.


if __name__ == '__main__':
    with tf.Session() as sess:
        sess.run(init)
        if config.restore:
            print('Loading Model...')
            ckpt = tf.train.get_checkpoint_state(path)
            if ckpt and ckpt.model_checkpoint_path:
                print(ckpt.model_checkpoint_path)
                saver.restore(sess, ckpt.model_checkpoint_path)

        for i in range(num_episodes):
            # Reset environment and get first new observation
            s = env.start(i)
            d = False
            rAll = 0
            # The Q-Network
            while not d:
                # Choose an action by greedily (with e chance of random action) from the Q-network
                if np.random.rand(1) < e or total_steps < pre_train_steps:
                    a = np.random.randint(0, len(config.valid_actions))
                else:
                    a = sess.run(mainQN.predict,
                                 feed_dict={mainQN.scalarInput: [s]})[0]
                s1, r, d = env.step(config.valid_actions[a])
                total_steps += 1
                my_buffer.append(
                    [s, a, r, s1,
                     d])  # Save the experience to our episode buffer.
Esempio n. 4
0
def execution(filename_output=DIR_OUTPUT + OUTPUT_FILE,
              filename_count=DIR_OUTPUT + OUTPUT_COUNT):
    parser = env.create_parser()
    opts = parser.parse_args()
    #configfn = 'config/test_prime.json'
    conf = None
    with open(opts.configfn, 'r') as fd:
        conf = json.load(fd)

    # inside definitions of enviorment
    env.start(conf)

    # outside definitions of enviorment
    env.server_tracker = serverTracker.serverTracker()
    env.repair_manager = regenerator.MBRrepair()

    logfile = conf.get('logfile', 'log.txt')
    loglevel = getattr(logging, conf.get('loglevel', 'INFO'))
    if loglevel == logging.DEBUG:
        print(loglevel)

    logging.basicConfig(
        filename=logfile,
        format='%(TIME)f %(filename)s:%(lineno)d/%(funcName)s %(message)s',
        level=loglevel,
        filemode='w')
    logger = logging.getLogger()
    logger.addFilter(SimulationTimeFilter())
    logging.info('%s', json.dumps(conf))

    print('Test of distribuition:')
    values = list()
    for i in range(1000):
        one = env.func_server_up_time()
        values.append(one)
        #print(one)
    print('Distribuition: \'up time\' dist:{} params:{}'.format(
        env.config['server']['up_rate_param'],
        env.config['server']['up_rate_distr']))
    values = np.array(values)
    print('Result samples:{:d} mean:{:f} std:{:f}'.format(
        len(values), np.mean(values), np.std(values)))

    print('State:{:d}'.format(int(env.config['chunk']['size'])))

    #servers = list()
    for hid in range(1, env.maxid + 1):
        down_time = env.func_server_down_time()
        #if down_time > env.endtime:
        #    print(hid)
        ev = (env.now + down_time, server.Server.startup, [
            hid, env.STATUS_NO_CHUNK, env.STATUS_STARTED,
            'Queue create server hid:{:d}, at:{:f}'.format(
                hid, env.now + down_time)
        ])
        env.enqueue(ev)
    logging.info('created %d bootup events', len(env.evqueue))

    env.repair_manager.activate([])

    #assert (len(sim.evqueue) == (sim.config['maxhid'] // sim.host_tracker.vulnerable_period) + 1)

    while env.evqueue and env.now < env.endtime:
        _now, fn, data = env.dequeue()
        logging.debug('dequeue len %d action(%s)', len(env.evqueue), data[-1])
        fn(data)

    files_results()

    return 0