示例#1
0
def main():
    file_io.set_logging_format()
    logging.getLogger().setLevel(logging.INFO)

    zk_ips = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_ips))
    zk_client.start()

    deployment_config = DeploymentConfig(zk_client)
    projects_manager = GlobalProjectsManager(zk_client)
    thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
    source_manager = SourceManager(zk_client, thread_pool)
    source_manager.configure_automatic_fetch(projects_manager)
    monit_operator = MonitOperator()

    options.define('private_ip', appscale_info.get_private_ip())
    options.define('syslog_server', appscale_info.get_headnode_ip())
    options.define('db_proxy', appscale_info.get_db_proxy())
    options.define('tq_proxy', appscale_info.get_tq_proxy())
    options.define('secret', appscale_info.get_secret())

    routing_client = RoutingClient(zk_client, options.private_ip,
                                   options.secret)
    instance_manager = InstanceManager(zk_client, monit_operator,
                                       routing_client, projects_manager,
                                       deployment_config, source_manager,
                                       options.syslog_server, thread_pool,
                                       options.private_ip)
    instance_manager.start()

    logger.info('Starting AppManager')

    io_loop = IOLoop.current()
    io_loop.run_sync(instance_manager.populate_api_servers)
    io_loop.start()
def deploy_apps(app_paths):
    """ Deploys all apps that reside in /opt/appscale/apps.

  Args:
    app_paths: A list of the full paths of the apps to be deployed.
  Returns:
    True on success, False otherwise.
  """
    secret = appscale_info.get_secret()
    acc = AppControllerClient(appscale_info.get_headnode_ip(), secret)

    # Wait for Cassandra to come up after a restore.
    time.sleep(15)

    for app_path in app_paths:
        # Extract app ID.
        app_id = app_path[app_path.rfind('/') + 1:app_path.find('.')]
        if not app_id:
            logging.error(
                "Malformed source code archive. Cannot complete "
                "application recovery for '{}'. Aborting...".format(app_path))
            return False

        file_suffix = re.search("\.(.*)\Z", app_path).group(1)

        logging.warning("Restoring app '{}', from '{}'".format(
            app_id, app_path))

        acc.upload_app(app_path, file_suffix)

    return True
def deploy_apps(app_paths):
  """ Deploys all apps that reside in /opt/appscale/apps.

  Args:
    app_paths: A list of the full paths of the apps to be deployed.
  Returns:
    True on success, False otherwise.
  """
  secret = appscale_info.get_secret()
  acc = AppControllerClient(appscale_info.get_headnode_ip(), secret)

  # Wait for Cassandra to come up after a restore.
  time.sleep(15)

  for app_path in app_paths:
    # Extract app ID.
    app_id = app_path[app_path.rfind('/')+1:app_path.find('.')]
    if not app_id:
      logging.error("Malformed source code archive. Cannot complete "
        "application recovery for '{}'. Aborting...".format(app_path))
      return False

    file_suffix = re.search("\.(.*)\Z", app_path).group(1)

    logging.warning("Restoring app '{}', from '{}'".format(app_id, app_path))

    acc.upload_app(app_path, file_suffix)

  return True
示例#4
0
def main():
  file_io.set_logging_format()
  logging.getLogger().setLevel(logging.INFO)

  zk_ips = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_ips))
  zk_client.start()

  deployment_config = DeploymentConfig(zk_client)
  projects_manager = GlobalProjectsManager(zk_client)
  thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
  source_manager = SourceManager(zk_client, thread_pool)
  source_manager.configure_automatic_fetch(projects_manager)
  monit_operator = MonitOperator()

  options.define('private_ip', appscale_info.get_private_ip())
  options.define('syslog_server', appscale_info.get_headnode_ip())
  options.define('db_proxy', appscale_info.get_db_proxy())
  options.define('load_balancer_ip', appscale_info.get_load_balancer_ips()[0])
  options.define('tq_proxy', appscale_info.get_tq_proxy())
  options.define('secret', appscale_info.get_secret())

  routing_client = RoutingClient(zk_client, options.private_ip, options.secret)
  instance_manager = InstanceManager(
    zk_client, monit_operator, routing_client, projects_manager,
    deployment_config, source_manager, options.syslog_server, thread_pool,
    options.private_ip)
  instance_manager.start()

  logger.info('Starting AppManager')

  io_loop = IOLoop.current()
  io_loop.run_sync(instance_manager.populate_api_servers)
  io_loop.start()
示例#5
0
def main():
    """ Main. """
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    options.define('secret', appscale_info.get_secret())

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)

    my_ip = appscale_info.get_private_ip()
    is_master = (my_ip == appscale_info.get_headnode_ip())
    is_lb = (my_ip in appscale_info.get_load_balancer_ips())
    is_tq = (my_ip in appscale_info.get_taskqueue_nodes())
    is_db = (my_ip in appscale_info.get_db_ips())

    if is_master:
        global zk_client
        zk_client = KazooClient(hosts=','.join(
            appscale_info.get_zk_node_ips()),
                                connection_retry=ZK_PERSISTENT_RECONNECTS)
        zk_client.start()
        # Start watching profiling configs in ZooKeeper
        stats_app.ProfilingManager(zk_client)

    app = tornado.web.Application(
        stats_app.get_local_stats_api_routes(is_lb, is_tq, is_db) +
        stats_app.get_cluster_stats_api_routes(is_master),
        debug=False)
    app.listen(constants.HERMES_PORT)

    # Start loop for accepting http requests.
    IOLoop.instance().start()

    logger.info("Hermes is up and listening on port: {}.".format(
        constants.HERMES_PORT))
示例#6
0
def main():
  """ Main. """
  parser = argparse.ArgumentParser()
  parser.add_argument(
    '-v', '--verbose', action='store_true',
    help='Output debug-level logging')
  args = parser.parse_args()

  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
  if args.verbose:
    logging.getLogger().setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())

  signal.signal(signal.SIGTERM, signal_handler)
  signal.signal(signal.SIGINT, signal_handler)

  my_ip = appscale_info.get_private_ip()
  is_master = (my_ip == appscale_info.get_headnode_ip())
  is_lb = (my_ip in appscale_info.get_load_balancer_ips())
  is_tq = (my_ip in appscale_info.get_taskqueue_nodes())
  is_db = (my_ip in appscale_info.get_db_ips())

  if is_master:
    global zk_client
    zk_client = KazooClient(
      hosts=','.join(appscale_info.get_zk_node_ips()),
      connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    # Start watching profiling configs in ZooKeeper
    stats_app.ProfilingManager(zk_client)

  app = tornado.web.Application(
    stats_app.get_local_stats_api_routes(is_lb, is_tq, is_db)
    + stats_app.get_cluster_stats_api_routes(is_master),
    debug=False
  )
  app.listen(constants.HERMES_PORT)

  # Start loop for accepting http requests.
  IOLoop.instance().start()

  logger.info("Hermes is up and listening on port: {}."
               .format(constants.HERMES_PORT))
示例#7
0
def main():
    """ Main. """
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    parser.add_argument('--port',
                        type=int,
                        default=constants.HERMES_PORT,
                        help='The port to listen on')
    args = parser.parse_args()

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    my_ip = appscale_info.get_private_ip()
    is_master = (my_ip == appscale_info.get_headnode_ip())
    is_lb = (my_ip in appscale_info.get_load_balancer_ips())
    is_tq = (my_ip in appscale_info.get_taskqueue_nodes())
    is_db = (my_ip in appscale_info.get_db_ips())

    app = web.Application(middlewares=[verify_secret_middleware])

    route_items = []
    route_items += get_local_stats_api_routes(is_lb, is_tq, is_db)
    route_items += get_cluster_stats_api_routes(is_master)
    for route, handler in route_items:
        app.router.add_get(route, handler)

    logger.info("Starting Hermes on port: {}.".format(args.port))
    web.run_app(app,
                port=args.port,
                access_log=logger,
                access_log_format='%a "%r" %s %bB %Tfs "%{User-Agent}i"')
if __name__ == "__main__":
    file_io.set_logging_format()
    logging.getLogger().setLevel(logging.INFO)

    zk_ips = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_ips))
    zk_client.start()

    deployment_config = DeploymentConfig(zk_client)
    projects_manager = GlobalProjectsManager(zk_client)
    thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
    source_manager = SourceManager(zk_client, thread_pool)
    source_manager.configure_automatic_fetch(projects_manager)

    options.define('private_ip', appscale_info.get_private_ip())
    options.define('syslog_server', appscale_info.get_headnode_ip())
    options.define('db_proxy', appscale_info.get_db_proxy())
    options.define('tq_proxy', appscale_info.get_tq_proxy())
    options.define('secret', appscale_info.get_secret())

    running_instances = recover_state(zk_client)
    PeriodicCallback(stop_failed_instances,
                     INSTANCE_CLEANUP_INTERVAL * 1000).start()

    app = tornado.web.Application([('/versions/([a-z0-9-_]+)', VersionHandler),
                                   ('/versions/([a-z0-9-_]+)/([0-9-]+)',
                                    InstanceHandler)])

    app.listen(constants.APP_MANAGER_PORT)
    logging.info('Starting AppManager on {}'.format(
        constants.APP_MANAGER_PORT))
示例#9
0
def main():
    """ Main. """
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    options.define('secret', appscale_info.get_secret())

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)

    my_ip = appscale_info.get_private_ip()
    is_master = (my_ip == appscale_info.get_headnode_ip())
    is_lb = (my_ip in appscale_info.get_load_balancer_ips())
    is_tq = (my_ip in appscale_info.get_taskqueue_nodes())

    if is_master:
        # Periodically check with the portal for new tasks.
        # Note: Currently, any active handlers from the tornado app will block
        # polling until they complete.
        PeriodicCallback(poll, constants.POLLING_INTERVAL).start()

        # Only master Hermes node handles /do_task route
        task_route = ('/do_task', TaskHandler)

        global zk_client
        zk_client = KazooClient(hosts=','.join(
            appscale_info.get_zk_node_ips()),
                                connection_retry=ZK_PERSISTENT_RECONNECTS)
        zk_client.start()
        # Start watching profiling configs in ZooKeeper
        stats_app.ProfilingManager(zk_client)

        # Periodically checks if the deployment is registered and uploads the
        # appscalesensor app for registered deployments.
        sensor_deployer = SensorDeployer(zk_client)
        PeriodicCallback(sensor_deployer.deploy,
                         constants.UPLOAD_SENSOR_INTERVAL).start()
    else:
        task_route = (
            '/do_task', Respond404Handler,
            dict(reason='Hermes slaves do not manage tasks from Portal'))

    app = tornado.web.Application(
        [
            ("/", MainHandler),
            task_route,
        ] + stats_app.get_local_stats_api_routes(is_lb, is_tq) +
        stats_app.get_cluster_stats_api_routes(is_master),
        debug=False)
    app.listen(constants.HERMES_PORT)

    # Start loop for accepting http requests.
    IOLoop.instance().start()

    logging.info("Hermes is up and listening on port: {}.".format(
        constants.HERMES_PORT))
示例#10
0
def main():
  """ Main. """
  parser = argparse.ArgumentParser()
  parser.add_argument(
    '-v', '--verbose', action='store_true',
    help='Output debug-level logging')
  args = parser.parse_args()

  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
  if args.verbose:
    logging.getLogger().setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())

  signal.signal(signal.SIGTERM, signal_handler)
  signal.signal(signal.SIGINT, signal_handler)

  my_ip = appscale_info.get_private_ip()
  is_master = (my_ip == appscale_info.get_headnode_ip())
  is_lb = (my_ip in appscale_info.get_load_balancer_ips())
  is_tq = (my_ip in appscale_info.get_taskqueue_nodes())

  if is_master:
    # Periodically check with the portal for new tasks.
    # Note: Currently, any active handlers from the tornado app will block
    # polling until they complete.
    PeriodicCallback(poll, constants.POLLING_INTERVAL).start()

    # Only master Hermes node handles /do_task route
    task_route = ('/do_task', TaskHandler)

    global zk_client
    zk_client = KazooClient(
      hosts=','.join(appscale_info.get_zk_node_ips()),
      connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    # Start watching profiling configs in ZooKeeper
    stats_app.ProfilingManager(zk_client)

    # Periodically checks if the deployment is registered and uploads the
    # appscalesensor app for registered deployments.
    sensor_deployer = SensorDeployer(zk_client)
    PeriodicCallback(sensor_deployer.deploy,
                     constants.UPLOAD_SENSOR_INTERVAL).start()
  else:
    task_route = ('/do_task', Respond404Handler,
                  dict(reason='Hermes slaves do not manage tasks from Portal'))

  app = tornado.web.Application([
      ("/", MainHandler),
      task_route,
    ]
    + stats_app.get_local_stats_api_routes(is_lb, is_tq)
    + stats_app.get_cluster_stats_api_routes(is_master),
    debug=False
  )
  app.listen(constants.HERMES_PORT)

  # Start loop for accepting http requests.
  IOLoop.instance().start()

  logging.info("Hermes is up and listening on port: {}."
               .format(constants.HERMES_PORT))
示例#11
0
################################
if __name__ == "__main__":
  file_io.set_logging_format()
  logging.getLogger().setLevel(logging.INFO)

  zk_ips = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_ips))
  zk_client.start()

  deployment_config = DeploymentConfig(zk_client)
  projects_manager = GlobalProjectsManager(zk_client)
  thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
  source_manager = SourceManager(zk_client, thread_pool)

  options.define('private_ip', appscale_info.get_private_ip())
  options.define('syslog_server', appscale_info.get_headnode_ip())
  options.define('db_proxy', appscale_info.get_db_proxy())
  options.define('tq_proxy', appscale_info.get_tq_proxy())

  app = tornado.web.Application([
    ('/versions/([a-z0-9-_]+)', VersionHandler),
    ('/versions/([a-z0-9-_]+)/([0-9-]+)', InstanceHandler)
  ])

  app.listen(constants.APP_MANAGER_PORT)
  logging.info('Starting AppManager on {}'.format(constants.APP_MANAGER_PORT))

  io_loop = IOLoop.current()
  io_loop.run_sync(populate_api_servers)
  io_loop.start()