Ejemplo n.º 1
0
def setup_train_dir(logdir, overwrite=False, force_train=True):
    """Setups directory for training."""
    tf.io.gfile.makedirs(logdir)
    config_path = os.path.join(logdir, 'config.json')
    if not os.path.exists(config_path) or overwrite:
        logging.info(
            'Using the existing passed in config as no config.json file exists in '
            '%s', logdir)
        with tf.io.gfile.GFile(config_path, 'w') as config_file:
            config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
            json.dump(config, config_file, sort_keys=True, indent=4)
    else:
        logging.info('Using config from config.json that exists in %s.',
                     logdir)
        with tf.io.gfile.GFile(config_path, 'r') as config_file:
            config_dict = json.load(config_file)
        CONFIG.update(config_dict)

    train_logs_dir = os.path.join(logdir, 'train.logs')
    if os.path.exists(train_logs_dir) and not force_train:
        raise ValueError(
            'You might be overwriting a directory that already '
            'has train_logs. Please provide a new logdir name in '
            'config or pass --force_train while launching script.')
    tf.io.gfile.makedirs(train_logs_dir)
Ejemplo n.º 2
0
def main():
    print("copying files")
    if os.path.isdir(SERVICES_PATH):
        shutil.rmtree(SERVICES_PATH)
    shutil.copytree('../services', SERVICES_PATH)
    os.chdir(SERVICES_PATH)
    for service_name, service_settings in CONFIG.items():
        print("deploying", service_name)
        docker_port, external_port = find_ports(service_name)
        os.chdir(os.path.join(SERVICES_PATH, service_name))
        os.system('sudo docker-compose build')
        config = CONFIG[service_name]
        if not config.get('nonginx', False):
            with open(
                    os.path.join(NGINX_CONF_PATH, 'sites-available',
                                 service_name), 'w') as nginx_conf:
                nginx_conf.write(
                    render_nginx_conf(
                        service_name,
                        external_port,
                        docker_port,
                        config.get('static_dir_path', None),
                        os.path.join(os.getcwd(), config['static_dir_path'])
                        if 'static_dir_path' in config else None,
                    ))
            os.chdir('..')
            os.system('ln -s {} {}'.format(
                os.path.join(NGINX_CONF_PATH, 'sites-available', service_name),
                os.path.join(NGINX_CONF_PATH, 'sites-enabled', service_name),
            ))
Ejemplo n.º 3
0
def main(filename=f"availabilities_{date.today().strftime('%Y-%m-%d')}.json"):

    availabilities = {}

    for name, info in CONFIG.items():
        fn_string = f"{info['function']}(**{info['args']})"
        is_any_available = eval(fn_string)
        availabilities[name] = is_any_available

    with open(filename, "w") as f:
        json.dump(availabilities, f)
Ejemplo n.º 4
0
def setup_train_dir(logdir):
    """Setups directory for training."""
    tf.io.gfile.makedirs(logdir)
    config_path = os.path.join(logdir, 'config.yml')
    if not os.path.exists(config_path):
        logging.info(
            'Using config from config.py as no config.yml file exists in '
            '%s', logdir)
        with tf.io.gfile.GFile(config_path, 'w') as config_file:
            config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
            yaml.safe_dump(config, config_file, default_flow_style=False)
    else:
        logging.info('Using config from config.yml that exists in %s.', logdir)
        with tf.io.gfile.GFile(config_path, 'r') as config_file:
            config_dict = yaml.safe_load(config_file)
        CONFIG.update(config_dict)

    train_logs_dir = os.path.join(logdir, 'train_logs')
    if os.path.exists(train_logs_dir) and not FLAGS.force_train:
        raise ValueError(
            'You might be overwriting a directory that already '
            'has train_logs. Please provide a new logdir name in '
            'config or pass --force_train while launching script.')
    tf.io.gfile.makedirs(train_logs_dir)
Ejemplo n.º 5
0
        svc.setServiceParent(ret)

    # DO IT NOW
    ret.setServiceParent(service.IServiceCollection(application))

# Load the config
try:
    from config import CONFIG  # pylint:disable=no-name-in-module,import-error
except ImportError:
    CONFIG = {}

# Merge user config over defaults
DEFAULT_CONFIG = {
    'docker_url': None,
    'bind_interface': '',
    'bind_port': 53,
    'bind_protocols': ['tcp', 'udp'],
    'no_nxdomain': True,
    'authoritive': True,
}
CONFIG = dict(DEFAULT_CONFIG.items() + CONFIG.items())

application = service.Application('dnsserver', 1, 1)  # noqa pylint:disable=invalid-name
main()


# Doin' it wrong
if __name__ == '__main__':
    import sys
    print "Usage: twistd -y %s" % sys.argv[0]
Ejemplo n.º 6
0
def main():
    logger.configure('./{}_logs'.format(C['env_id']))
    for k, v in C.items():
        logger.record_tabular(k, v)
    logger.dump_tabular()

    train_tracker = [0.0]
    eval_tracker = []
    best_reward = 0

    sess = tf.InteractiveSession()
    train_reward = tf.placeholder(tf.float32, name='train_reward')
    eval_reward = tf.placeholder(tf.float32, name='eval_reward')

    train_env = make_env(C['env_id'], C['noop_max'])
    eval_env = make_env(C['env_id'], C['noop_max'])
    agent = Agent(train_env, C)
    sess.run(tf.global_variables_initializer())
    agent.nn.update_target()

    train_summary = tf.summary.scalar('train_rew', train_reward)
    eval_summary = tf.summary.scalar('eval_reward', eval_reward)
    writer = tf.summary.FileWriter('{}{}_summary'.format('./', C['env_id']),
                                   sess.graph)

    train_fs = reset_fs()
    train_s = train_env.reset()
    for it in range(C['iterations']):
        # Training
        train_fs.append(train_s)
        train_a = agent.act(np.transpose(train_fs, (1, 2, 0)))
        ns, train_r, train_d, _ = train_env.step(train_a)
        train_tracker[-1] += train_r
        agent.perceive(train_s, train_a, train_r, float(train_d), it)
        train_s = ns
        if train_d:
            if train_env.env.env.was_real_done:
                if len(train_tracker) % 100 == 0:
                    summary = sess.run(train_summary,
                                       feed_dict={
                                           train_reward:
                                           np.mean(train_tracker[-100:])
                                       })
                    writer.add_summary(summary, it)
                    logger.record_tabular('steps', it)
                    logger.record_tabular('episode', len(train_tracker))
                    logger.record_tabular('epsilon', 100 * agent.epsilon)
                    logger.record_tabular('learning rate', agent.lr)
                    logger.record_tabular('mean 100 episodes',
                                          np.mean(train_tracker[-100:]))
                    logger.dump_tabular()
                train_tracker.append(0.0)
            train_fs = reset_fs()
            train_s = train_env.reset()

        # Evaluation
        if it % C['eval_freq'] == 0:
            for _ in range(C['eval_episodes']):
                temp_video = []
                temp_reward = 0
                eval_tracker.append(0.0)
                eval_fs = reset_fs()
                eval_s = eval_env.reset()
                while True:
                    temp_video.append(eval_s)
                    eval_fs.append(eval_s)
                    eval_a = agent.greedy_act(np.transpose(eval_fs, (1, 2, 0)))
                    eval_s, eval_r, eval_d, _ = eval_env.step(eval_a)
                    eval_tracker[-1] += eval_r

                    if eval_env.env.env.was_real_done:
                        break
                    if eval_d:
                        eval_fs = reset_fs()
                        eval_s = eval_env.reset()

                if eval_tracker[-1] > best_reward:  # Save best video
                    best_reward = eval_tracker[-1]
                    logger.log(
                        'Dump best video reward: {}'.format(best_reward))
                    best_video = temp_video
                    with open('video.pkl', 'wb') as f:
                        pickle.dump(best_video,
                                    f,
                                    protocol=pickle.HIGHEST_PROTOCOL)

            logger.log(
                'Evaluate mean reward: {:.2f}, max reward: {:.2f}, std: {:.2f}'
                .format(np.mean(eval_tracker[-C['eval_episodes']:]),
                        np.max(eval_tracker[-C['eval_episodes']:]),
                        np.std(eval_tracker[-C['eval_episodes']:])))
            summary = sess.run(eval_summary,
                               feed_dict={
                                   eval_reward:
                                   np.mean(eval_tracker[-C['eval_episodes']:])
                               })
            writer.add_summary(summary, it)

    agent.nn.save('./{}_model'.format(C['env_id']))
Ejemplo n.º 7
0
        svc.setServiceParent(ret)

    # DO IT NOW
    ret.setServiceParent(service.IServiceCollection(application))


# Load the config
try:
    from config import CONFIG  # pylint:disable=no-name-in-module,import-error
except ImportError:
    CONFIG = {}

# Merge user config over defaults
DEFAULT_CONFIG = {
    'docker_url': None,
    'bind_interface': '',
    'bind_port': 53,
    'bind_protocols': ['tcp', 'udp'],
    'no_nxdomain': True,
    'authoritive': True,
}
CONFIG = dict(DEFAULT_CONFIG.items() + CONFIG.items())

application = service.Application('dnsserver', 1, 1)  # noqa pylint:disable=invalid-name
main()

# Doin' it wrong
if __name__ == '__main__':
    import sys
    print "Usage: twistd -y %s" % sys.argv[0]