Esempio n. 1
0
def main():
    global config
    global dm

    with open('dm-config.json', 'r') as f:
        config = json.load(f)

    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.getLevelName(config['config']['log_level']),
                        stream=sys.stderr)

    define("port", default=5000, help="run on the given port", type=int)
    tornado.options.parse_command_line()

    logging.info("Starting up...")

    deployer_utils.fill_hadoop_env(config['environment'])

    package_repository = PackageRepoRestClient(config['config']["package_repository"], config['config']['stage_root'])
    dm = deployment_manager.DeploymentManager(package_repository,
                                              package_registrar.HbasePackageRegistrar(
                                                  config['environment']['hbase_thrift_server'],
                                                  config['environment']['webhdfs_host'],
                                                  'hdfs',
                                                  config['environment']['webhdfs_port'],
                                                  config['config']['stage_root']),
                                              application_registrar.HbaseApplicationRegistrar(
                                                  config['environment']['hbase_thrift_server']),
                                              config['environment'],
                                              config['config'])

    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(options.port)

    tornado.ioloop.IOLoop.instance().start()
Esempio n. 2
0
def main():
    """
    main
    """
    global CONFIG
    global YARN_RESOURCE_MANAGER
    global YARN_PORT
    global OOZIE_URI
    global _APPLICATION_REGISTRAR
    global _HBASE

    with open('dm-config.json', 'r') as conf:
        CONFIG = json.load(conf)

    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.getLevelName(
                            CONFIG['config']['log_level']),
                        stream=sys.stderr)
    logging.info('Starting... Building actual status for applications')
    deployer_utils.fill_hadoop_env(CONFIG['environment'], CONFIG['config'])
    _APPLICATION_REGISTRAR = application_registrar.HbaseApplicationRegistrar\
    (CONFIG['environment']['hbase_thrift_server'])
    _HBASE = application_summary_registrar.HBaseAppplicationSummary\
    (CONFIG['environment']['hbase_thrift_server'])
    YARN_RESOURCE_MANAGER = CONFIG['environment']['yarn_resource_manager_host']
    YARN_PORT = CONFIG['environment']['yarn_resource_manager_port']
    OOZIE_URI = CONFIG['environment']['oozie_uri']
    prev_run_app_list = []
    while True:
        start_time = time.time()
        app_list = _APPLICATION_REGISTRAR.list_applications()
        application_summary(app_list)
        prev_run_app_list = app_list
        wait_time = _MAX_TIME_BOUND - int(time.time() - start_time)
        app_list = _APPLICATION_REGISTRAR.list_applications()
        if set(app_list).difference(prev_run_app_list) == set([]) \
        and set(prev_run_app_list).difference(app_list) == set([]):
            time.sleep(wait_time)
        else:
            continue
def main():
    """
    main
    """
    config = None
    with open('dm-config.json', 'r') as con:
        config = json.load(con)

    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.getLevelName(
                            config['config']['log_level']),
                        stream=sys.stderr)

    deployer_utils.fill_hadoop_env(config['environment'], config['config'])

    summary = ApplicationDetailedSummary(config['environment'],
                                         config['config'])

    logging.info('Starting... Building actual status for applications')

    while True:
        # making sure every 30 seconds generate summary initiated
        start_time_on_cur_round = milli_time()

        summary.generate()

        finish_time_on_cur_round = (milli_time() -
                                    start_time_on_cur_round) / 1000.0
        logging.info("Finished generating summary, time taken %s seconds",
                     str(finish_time_on_cur_round))

        if finish_time_on_cur_round >= SUMMARY_INTERVAL:
            continue
        else:
            # putting sleep only for the remainig time from the current round's time
            time.sleep(SUMMARY_INTERVAL - finish_time_on_cur_round)