def deploy_apps(app_paths): """ Deploys all apps that reside in /opt/appscale/apps. Args: app_paths: A list of the full paths of the apps to be deployed. Returns: True on success, False otherwise. """ secret = appscale_info.get_secret() acc = AppControllerClient(appscale_info.get_headnode_ip(), secret) # Wait for Cassandra to come up after a restore. time.sleep(15) for app_path in app_paths: # Extract app ID. app_id = app_path[app_path.rfind('/') + 1:app_path.find('.')] if not app_id: logging.error( "Malformed source code archive. Cannot complete " "application recovery for '{}'. Aborting...".format(app_path)) return False file_suffix = re.search("\.(.*)\Z", app_path).group(1) logging.warning("Restoring app '{}', from '{}'".format( app_id, app_path)) acc.upload_app(app_path, file_suffix) return True
def main(): shutdown.install_signal_handlers() # The timezone must be set in the devappserver2 process rather than just in # the runtime so printed log timestamps are consistent and the taskqueue stub # expects the timezone to be UTC. The runtime inherits the environment. os.environ['TZ'] = 'UTC' if hasattr(time, 'tzset'): # time.tzet() should be called on Unix, but doesn't exist on Windows. time.tzset() options = PARSER.parse_args() os.environ['MY_IP_ADDRESS'] = options.host os.environ['MY_PORT'] = str(options.port) os.environ['COOKIE_SECRET'] = appscale_info.get_secret() os.environ['NGINX_HOST'] = options.nginx_host if options.pidfile: with open(options.pidfile, 'w') as pidfile: pidfile.write(str(os.getpid())) dev_server = DevelopmentServer() try: dev_server.start(options) shutdown.wait_until_shutdown() finally: dev_server.stop()
def main(): global datastore_path global deployment_config logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument('-p', '--port', type=int, default=DEFAULT_PORT, required=True, help="The blobstore server's port") parser.add_argument('-d', '--datastore-path', required=True, help='The location of the datastore server') args = parser.parse_args() datastore_path = args.datastore_path zk_ips = appscale_info.get_zk_node_ips() zk_client = KazooClient(hosts=','.join(zk_ips)) zk_client.start() deployment_config = DeploymentConfig(zk_client) setup_env() http_server = tornado.httpserver.HTTPServer( Application(), max_buffer_size=MAX_REQUEST_BUFF_SIZE, xheaders=True) http_server.listen(args.port) # Make sure this server is accessible from each of the load balancers. secret = appscale_info.get_secret() for load_balancer in appscale_info.get_load_balancer_ips(): acc = AppControllerClient(load_balancer, secret) acc.add_routing_for_blob_server() logger.info('Starting BlobServer on {}'.format(args.port)) tornado.ioloop.IOLoop.instance().start()
def main(): """ Starts the AdminServer. """ logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument('--autoscaler', action='store_true', help='Ability to start/terminate instances.') parser.add_argument('-p', '--port', type=int, default=DEFAULT_PORT, help='The port to listen on') parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) app = make_app(appscale_info.get_secret(), args.autoscaler) logger.info('Starting InfrastructureManager') app.listen(args.port) io_loop = IOLoop.current() io_loop.start()
def get_task_headers(self, request): """ Gets the task headers used for a task web request. Args: request: A taskqueue_service_pb.TaskQueueAddRequest Returns: A dictionary of key/values for a web request. """ headers = {} for header in request.header_list(): headers[header.key()] = header.value() eta = self.__when_to_run(request) # This header is how we authenticate that it's an internal request secret = appscale_info.get_secret() secret_hash = hashlib.sha1(request.app_id() + '/' + \ secret).hexdigest() headers['X-AppEngine-Fake-Is-Admin'] = secret_hash headers['X-AppEngine-QueueName'] = request.queue_name() headers['X-AppEngine-TaskName'] = request.task_name() headers['X-AppEngine-TaskRetryCount'] = '0' headers['X-AppEngine-TaskExecutionCount'] = '0' headers['X-AppEngine-TaskETA'] = str(int(eta.strftime("%s"))) return headers
def get_access_token(self, scopes, service_account_id=None, service_account_name=None): """ Generates an access token from a service account. Args: scopes: A list of strings specifying scopes. service_account_id: An integer specifying a service account ID. service_account_name: A string specifying a service account name. Returns: An AccessToken. Raises: UnknownError if the service account is not configured. """ # TODO: Check if it makes sense to store the audience with the service # account definition. default_audience = 'https://www.googleapis.com/oauth2/v4/token' if (service_account_name is None or (self._key is not None and self._key.key_name == service_account_name)): lb_ip = random.choice(appscale_info.get_load_balancer_ips()) url = 'https://{}:17441/oauth/token'.format(lb_ip) payload = urllib.urlencode({'scope': ' '.join(scopes), 'grant_type': 'secret', 'project_id': self.project_id, 'secret': appscale_info.get_secret()}) try: response = urllib2.urlopen( url, payload, context=ssl._create_unverified_context()) except urllib2.HTTPError as error: raise UnknownError(error.msg) except urllib2.URLError as error: raise UnknownError(error.reason) token_details = json.loads(response.read()) expiration_time = int(time.time()) + token_details['expires_in'] return AccessToken(token_details['access_token'], expiration_time) if service_account_id is not None: raise UnknownError( '{} is not configured'.format(service_account_id)) service_account_node = '/'.join([self._service_accounts_node, service_account_name]) try: account_details = self._zk_client.get(service_account_node)[0] except NoNodeError: raise UnknownError( '{} is not configured'.format(service_account_name)) try: account_details = json.loads(account_details) except ValueError: raise UnknownError( '{} has invalid data'.format(service_account_node)) pem = account_details['privateKey'].encode('utf-8') key = PrivateKey.from_pem(service_account_name, pem) assertion = key.generate_assertion(default_audience, scopes) return self._get_token(default_audience, assertion)
def deploy_apps(app_paths): """ Deploys all apps that reside in /opt/appscale/apps. Args: app_paths: A list of the full paths of the apps to be deployed. Returns: True on success, False otherwise. """ secret = appscale_info.get_secret() acc = AppControllerClient(appscale_info.get_headnode_ip(), secret) # Wait for Cassandra to come up after a restore. time.sleep(15) for app_path in app_paths: # Extract app ID. app_id = app_path[app_path.rfind('/')+1:app_path.find('.')] if not app_id: logging.error("Malformed source code archive. Cannot complete " "application recovery for '{}'. Aborting...".format(app_path)) return False file_suffix = re.search("\.(.*)\Z", app_path).group(1) logging.warning("Restoring app '{}', from '{}'".format(app_id, app_path)) acc.upload_app(app_path, file_suffix) return True
def main(): file_io.set_logging_format() logging.getLogger().setLevel(logging.INFO) zk_ips = appscale_info.get_zk_node_ips() zk_client = KazooClient(hosts=','.join(zk_ips)) zk_client.start() deployment_config = DeploymentConfig(zk_client) projects_manager = GlobalProjectsManager(zk_client) thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS) source_manager = SourceManager(zk_client, thread_pool) source_manager.configure_automatic_fetch(projects_manager) monit_operator = MonitOperator() options.define('private_ip', appscale_info.get_private_ip()) options.define('syslog_server', appscale_info.get_headnode_ip()) options.define('db_proxy', appscale_info.get_db_proxy()) options.define('tq_proxy', appscale_info.get_tq_proxy()) options.define('secret', appscale_info.get_secret()) routing_client = RoutingClient(zk_client, options.private_ip, options.secret) instance_manager = InstanceManager(zk_client, monit_operator, routing_client, projects_manager, deployment_config, source_manager, options.syslog_server, thread_pool, options.private_ip) instance_manager.start() logger.info('Starting AppManager') io_loop = IOLoop.current() io_loop.run_sync(instance_manager.populate_api_servers) io_loop.start()
def main(): file_io.set_logging_format() logging.getLogger().setLevel(logging.INFO) zk_ips = appscale_info.get_zk_node_ips() zk_client = KazooClient(hosts=','.join(zk_ips)) zk_client.start() deployment_config = DeploymentConfig(zk_client) projects_manager = GlobalProjectsManager(zk_client) thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS) source_manager = SourceManager(zk_client, thread_pool) source_manager.configure_automatic_fetch(projects_manager) monit_operator = MonitOperator() options.define('private_ip', appscale_info.get_private_ip()) options.define('syslog_server', appscale_info.get_headnode_ip()) options.define('db_proxy', appscale_info.get_db_proxy()) options.define('load_balancer_ip', appscale_info.get_load_balancer_ips()[0]) options.define('tq_proxy', appscale_info.get_tq_proxy()) options.define('secret', appscale_info.get_secret()) routing_client = RoutingClient(zk_client, options.private_ip, options.secret) instance_manager = InstanceManager( zk_client, monit_operator, routing_client, projects_manager, deployment_config, source_manager, options.syslog_server, thread_pool, options.private_ip) instance_manager.start() logger.info('Starting AppManager') io_loop = IOLoop.current() io_loop.run_sync(instance_manager.populate_api_servers) io_loop.start()
def main(): """ Starts the AdminServer. """ logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument('-p', '--port', type=int, default=constants.DEFAULT_PORT, help='The port to listen on') parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() if args.verbose: logging.getLogger().setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) options.define('login_ip', appscale_info.get_login_ip()) options.define('private_ip', appscale_info.get_private_ip()) acc = appscale_info.get_appcontroller_client() ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret) zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE) thread_pool = ThreadPoolExecutor(4) monit_operator = MonitOperator() all_resources = { 'acc': acc, 'ua_client': ua_client, 'zk_client': zk_client, 'version_update_lock': version_update_lock, 'thread_pool': thread_pool } if options.private_ip in appscale_info.get_taskqueue_nodes(): logging.info('Starting push worker manager') GlobalPushWorkerManager(zk_client, monit_operator) app = web.Application([ ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions', VersionsHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions/([a-z0-9-]+)', VersionHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/operations/([a-z0-9-]+)', OperationsHandler), ('/api/queue/update', UpdateQueuesHandler, { 'zk_client': zk_client }) ]) logging.info('Starting AdminServer') app.listen(args.port) io_loop = IOLoop.current() io_loop.start()
def update_secret(revision_key): """ Ensures the revision's secret matches the deployment secret. """ deployment_secret = get_secret() revision_base = os.path.join(UNPACK_ROOT, revision_key) secret_module = os.path.join(revision_base, 'app', 'lib', 'secret_key.py') with open(secret_module) as secret_file: revision_secret = secret_file.read().split()[-1][1:-1] if revision_secret == deployment_secret: return with open(secret_module, 'w') as secret_file: secret_file.write("GLOBAL_SECRET_KEY = '{}'".format(deployment_secret))
async def verify_secret_middleware(request, handler): """ Security middleware for secret verification. Args: request: an instance of Request. handler: a callable handler for further request processing. Return: 403 Response if secret is incorrect, Response provided by handler otherwise. """ if request.headers.get(SECRET_HEADER) != appscale_info.get_secret(): logger.warn( "Received bad secret from {client}".format(client=request.remote)) return web.Response(status=http.HTTPStatus.FORBIDDEN, reason="Bad secret") return await handler(request)
def main(): """ Main. """ parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) if args.verbose: logging.getLogger().setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) my_ip = appscale_info.get_private_ip() is_master = (my_ip == appscale_info.get_headnode_ip()) is_lb = (my_ip in appscale_info.get_load_balancer_ips()) is_tq = (my_ip in appscale_info.get_taskqueue_nodes()) is_db = (my_ip in appscale_info.get_db_ips()) if is_master: global zk_client zk_client = KazooClient(hosts=','.join( appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() # Start watching profiling configs in ZooKeeper stats_app.ProfilingManager(zk_client) app = tornado.web.Application( stats_app.get_local_stats_api_routes(is_lb, is_tq, is_db) + stats_app.get_cluster_stats_api_routes(is_master), debug=False) app.listen(constants.HERMES_PORT) # Start loop for accepting http requests. IOLoop.instance().start() logger.info("Hermes is up and listening on port: {}.".format( constants.HERMES_PORT))
def main(): """ Main. """ parser = argparse.ArgumentParser() parser.add_argument( '-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) if args.verbose: logging.getLogger().setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) my_ip = appscale_info.get_private_ip() is_master = (my_ip == appscale_info.get_headnode_ip()) is_lb = (my_ip in appscale_info.get_load_balancer_ips()) is_tq = (my_ip in appscale_info.get_taskqueue_nodes()) is_db = (my_ip in appscale_info.get_db_ips()) if is_master: global zk_client zk_client = KazooClient( hosts=','.join(appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() # Start watching profiling configs in ZooKeeper stats_app.ProfilingManager(zk_client) app = tornado.web.Application( stats_app.get_local_stats_api_routes(is_lb, is_tq, is_db) + stats_app.get_cluster_stats_api_routes(is_master), debug=False ) app.listen(constants.HERMES_PORT) # Start loop for accepting http requests. IOLoop.instance().start() logger.info("Hermes is up and listening on port: {}." .format(constants.HERMES_PORT))
async def _fetch_remote_stats_async(self, node_ip, max_age, include_lists): """ Fetches statistics from a single remote node. Args: node_ip: a string - remote node IP. max_age: An int - max age of cached snapshot to use (in seconds). include_lists: An instance of IncludeLists. Returns: An instance of stats snapshot. """ # Security header headers = {constants.SECRET_HEADER: appscale_info.get_secret()} # Build query arguments arguments = {} if include_lists is not None: arguments['include_lists'] = include_lists.asdict() if max_age is not None: arguments['max_age'] = max_age url = "http://{ip}:{port}/{path}".format( ip=node_ip, port=constants.HERMES_PORT, path=self.method_path) try: async with aiohttp.ClientSession() as session: awaitable_get = session.get( url, headers=headers, json=arguments, timeout=constants.REMOTE_REQUEST_TIMEOUT ) async with awaitable_get as resp: if resp.status >= 400: err_message = 'HTTP {}: {}'.format(resp.status, resp.reason) resp_text = await resp.text() if resp_text: err_message += '. {}'.format(resp_text) logger.error("Failed to get {} ({})".format(url, err_message)) raise RemoteHermesError(err_message) snapshot = await resp.json(content_type=None) return converter.stats_from_dict(self.stats_model, snapshot) except aiohttp.ClientError as err: logger.error("Failed to get {} ({})".format(url, err)) raise RemoteHermesError(str(err))
def deploy_sensor_app(): """ Uploads the sensor app for registered deployments. """ deployment_id = helper.get_deployment_id() #If deployment is not registered, then do nothing. if not deployment_id: return secret = appscale_info.get_secret() ua_client = UAClient(appscale_info.get_db_master_ip(), secret) # If the appscalesensor app is already running, then do nothing. if ua_client.is_app_enabled(hermes_constants.APPSCALE_SENSOR): return pwd = appscale_utils.encrypt_password( hermes_constants.USER_EMAIL, appscale_utils.random_password_generator()) if create_appscale_user(pwd, ua_client) and create_xmpp_user( pwd, ua_client): logging.debug("Created new user and now tarring app to be deployed.") file_path = os.path.join(os.path.dirname(__file__), '../Apps/sensor') app_dir_location = os.path.join(hermes_constants.APP_DIR_LOCATION, hermes_constants.APPSCALE_SENSOR) archive = tarfile.open(app_dir_location, "w|gz") archive.add(file_path, arcname=hermes_constants.APPSCALE_SENSOR) archive.close() try: logging.info( "Deploying the sensor app for registered deployments.") acc = appscale_info.get_appcontroller_client() acc.upload_app(app_dir_location, hermes_constants.FILE_SUFFIX, hermes_constants.USER_EMAIL) except AppControllerException: logging.exception("AppControllerException while trying to deploy " "appscalesensor app.") else: logging.error("Error while creating or accessing the user to deploy " "appscalesensor app.")
def main(): """ Main. """ parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) if args.verbose: logging.getLogger().setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) my_ip = appscale_info.get_private_ip() is_master = (my_ip == appscale_info.get_headnode_ip()) is_lb = (my_ip in appscale_info.get_load_balancer_ips()) is_tq = (my_ip in appscale_info.get_taskqueue_nodes()) if is_master: # Periodically check with the portal for new tasks. # Note: Currently, any active handlers from the tornado app will block # polling until they complete. PeriodicCallback(poll, constants.POLLING_INTERVAL).start() # Only master Hermes node handles /do_task route task_route = ('/do_task', TaskHandler) global zk_client zk_client = KazooClient(hosts=','.join( appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() # Start watching profiling configs in ZooKeeper stats_app.ProfilingManager(zk_client) # Periodically checks if the deployment is registered and uploads the # appscalesensor app for registered deployments. sensor_deployer = SensorDeployer(zk_client) PeriodicCallback(sensor_deployer.deploy, constants.UPLOAD_SENSOR_INTERVAL).start() else: task_route = ( '/do_task', Respond404Handler, dict(reason='Hermes slaves do not manage tasks from Portal')) app = tornado.web.Application( [ ("/", MainHandler), task_route, ] + stats_app.get_local_stats_api_routes(is_lb, is_tq) + stats_app.get_cluster_stats_api_routes(is_master), debug=False) app.listen(constants.HERMES_PORT) # Start loop for accepting http requests. IOLoop.instance().start() logging.info("Hermes is up and listening on port: {}.".format( constants.HERMES_PORT))
def main(): """ Main. """ parser = argparse.ArgumentParser() parser.add_argument( '-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) if args.verbose: logging.getLogger().setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) my_ip = appscale_info.get_private_ip() is_master = (my_ip == appscale_info.get_headnode_ip()) is_lb = (my_ip in appscale_info.get_load_balancer_ips()) is_tq = (my_ip in appscale_info.get_taskqueue_nodes()) if is_master: # Periodically check with the portal for new tasks. # Note: Currently, any active handlers from the tornado app will block # polling until they complete. PeriodicCallback(poll, constants.POLLING_INTERVAL).start() # Only master Hermes node handles /do_task route task_route = ('/do_task', TaskHandler) global zk_client zk_client = KazooClient( hosts=','.join(appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() # Start watching profiling configs in ZooKeeper stats_app.ProfilingManager(zk_client) # Periodically checks if the deployment is registered and uploads the # appscalesensor app for registered deployments. sensor_deployer = SensorDeployer(zk_client) PeriodicCallback(sensor_deployer.deploy, constants.UPLOAD_SENSOR_INTERVAL).start() else: task_route = ('/do_task', Respond404Handler, dict(reason='Hermes slaves do not manage tasks from Portal')) app = tornado.web.Application([ ("/", MainHandler), task_route, ] + stats_app.get_local_stats_api_routes(is_lb, is_tq) + stats_app.get_cluster_stats_api_routes(is_master), debug=False ) app.listen(constants.HERMES_PORT) # Start loop for accepting http requests. IOLoop.instance().start() logging.info("Hermes is up and listening on port: {}." .format(constants.HERMES_PORT))
def main(): """ Starts the AdminServer. """ logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) parser = argparse.ArgumentParser( prog='appscale-admin', description='Manages AppScale-related processes') subparsers = parser.add_subparsers(dest='command') subparsers.required = True serve_parser = subparsers.add_parser( 'serve', description='Starts the server that manages AppScale processes') serve_parser.add_argument( '-p', '--port', type=int, default=constants.DEFAULT_PORT, help='The port to listen on') serve_parser.add_argument( '-v', '--verbose', action='store_true', help='Output debug-level logging') subparsers.add_parser( 'summary', description='Lists AppScale processes running on this machine') restart_parser = subparsers.add_parser( 'restart', description='Restart AppScale processes running on this machine') restart_parser.add_argument('service', nargs='+', help='The process or service ID to restart') args = parser.parse_args() if args.command == 'summary': table = sorted(list(get_combined_services().items())) print(tabulate(table, headers=['Service', 'State'])) sys.exit(0) if args.command == 'restart': socket_path = urlquote(ServiceManagerHandler.SOCKET_PATH, safe='') session = requests_unixsocket.Session() response = session.post( 'http+unix://{}/'.format(socket_path), data={'command': 'restart', 'arg': [args.service]}) response.raise_for_status() return if args.verbose: logger.setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) options.define('login_ip', appscale_info.get_login_ip()) options.define('private_ip', appscale_info.get_private_ip()) options.define('load_balancers', appscale_info.get_load_balancer_ips()) acc = appscale_info.get_appcontroller_client() ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret) zk_client = KazooClient( hosts=','.join(appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE) thread_pool = ThreadPoolExecutor(4) monit_operator = MonitOperator() all_resources = { 'acc': acc, 'ua_client': ua_client, 'zk_client': zk_client, 'version_update_lock': version_update_lock, 'thread_pool': thread_pool } if options.private_ip in appscale_info.get_taskqueue_nodes(): logger.info('Starting push worker manager') GlobalPushWorkerManager(zk_client, monit_operator) service_manager = ServiceManager(zk_client) service_manager.start() app = web.Application([ ('/oauth/token', OAuthHandler, {'ua_client': ua_client}), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions', VersionsHandler, {'ua_client': ua_client, 'zk_client': zk_client, 'version_update_lock': version_update_lock, 'thread_pool': thread_pool}), ('/v1/projects', ProjectsHandler, all_resources), ('/v1/projects/([a-z0-9-]+)', ProjectHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)', ServiceHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions/([a-z0-9-]+)', VersionHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/operations/([a-z0-9-]+)', OperationsHandler, {'ua_client': ua_client}), ('/api/cron/update', UpdateCronHandler, {'acc': acc, 'zk_client': zk_client, 'ua_client': ua_client}), ('/api/datastore/index/add', UpdateIndexesHandler, {'zk_client': zk_client, 'ua_client': ua_client}), ('/api/queue/update', UpdateQueuesHandler, {'zk_client': zk_client, 'ua_client': ua_client}) ]) logger.info('Starting AdminServer') app.listen(args.port) management_app = web.Application([ ('/', ServiceManagerHandler, {'service_manager': service_manager})]) management_server = HTTPServer(management_app) management_socket = bind_unix_socket(ServiceManagerHandler.SOCKET_PATH) management_server.add_socket(management_socket) io_loop = IOLoop.current() io_loop.start()
""" This script dumps all users. """ from appscale.common import appscale_info from appscale.common.ua_client import UAClient if __name__ == "__main__": secret = appscale_info.get_secret() ua_client = UAClient(appscale_info.get_db_master_ip(), secret) for user in ua_client.get_all_users(): print(user)
zk_ips = appscale_info.get_zk_node_ips() zk_client = KazooClient(hosts=','.join(zk_ips)) zk_client.start() deployment_config = DeploymentConfig(zk_client) projects_manager = GlobalProjectsManager(zk_client) thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS) source_manager = SourceManager(zk_client, thread_pool) source_manager.configure_automatic_fetch(projects_manager) options.define('private_ip', appscale_info.get_private_ip()) options.define('syslog_server', appscale_info.get_headnode_ip()) options.define('db_proxy', appscale_info.get_db_proxy()) options.define('tq_proxy', appscale_info.get_tq_proxy()) options.define('secret', appscale_info.get_secret()) running_instances = recover_state(zk_client) PeriodicCallback(stop_failed_instances, INSTANCE_CLEANUP_INTERVAL * 1000).start() app = tornado.web.Application([('/versions/([a-z0-9-_]+)', VersionHandler), ('/versions/([a-z0-9-_]+)/([0-9-]+)', InstanceHandler)]) app.listen(constants.APP_MANAGER_PORT) logging.info('Starting AppManager on {}'.format( constants.APP_MANAGER_PORT)) io_loop = IOLoop.current() io_loop.run_sync(populate_api_servers)
def main(): """ Starts the AdminServer. """ logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) parser = argparse.ArgumentParser( prog='appscale-admin', description='Manages AppScale-related processes') subparsers = parser.add_subparsers(dest='command') subparsers.required = True serve_parser = subparsers.add_parser( 'serve', description='Starts the server that manages AppScale processes') serve_parser.add_argument( '-p', '--port', type=int, default=constants.DEFAULT_PORT, help='The port to listen on') serve_parser.add_argument( '-v', '--verbose', action='store_true', help='Output debug-level logging') subparsers.add_parser( 'summary', description='Lists AppScale processes running on this machine') args = parser.parse_args() if args.command == 'summary': table = sorted(list(get_combined_services().items())) print(tabulate(table, headers=['Service', 'State'])) sys.exit(0) if args.verbose: logger.setLevel(logging.DEBUG) options.define('secret', appscale_info.get_secret()) options.define('login_ip', appscale_info.get_login_ip()) options.define('private_ip', appscale_info.get_private_ip()) options.define('load_balancers', appscale_info.get_load_balancer_ips()) acc = appscale_info.get_appcontroller_client() ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret) zk_client = KazooClient( hosts=','.join(appscale_info.get_zk_node_ips()), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE) thread_pool = ThreadPoolExecutor(4) monit_operator = MonitOperator() all_resources = { 'acc': acc, 'ua_client': ua_client, 'zk_client': zk_client, 'version_update_lock': version_update_lock, 'thread_pool': thread_pool } if options.private_ip in appscale_info.get_taskqueue_nodes(): logger.info('Starting push worker manager') GlobalPushWorkerManager(zk_client, monit_operator) service_manager = ServiceManager(zk_client) service_manager.start() app = web.Application([ ('/oauth/token', OAuthHandler, {'ua_client': ua_client}), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions', VersionsHandler, all_resources), ('/v1/projects', ProjectsHandler, all_resources), ('/v1/projects/([a-z0-9-]+)', ProjectHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)', ServiceHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions/([a-z0-9-]+)', VersionHandler, all_resources), ('/v1/apps/([a-z0-9-]+)/operations/([a-z0-9-]+)', OperationsHandler, {'ua_client': ua_client}), ('/api/cron/update', UpdateCronHandler, {'acc': acc, 'zk_client': zk_client, 'ua_client': ua_client}), ('/api/queue/update', UpdateQueuesHandler, {'zk_client': zk_client, 'ua_client': ua_client}) ]) logger.info('Starting AdminServer') app.listen(args.port) io_loop = IOLoop.current() io_loop.start()
zk_ips = appscale_info.get_zk_node_ips() zk_client = KazooClient(hosts=','.join(zk_ips)) zk_client.start() deployment_config = DeploymentConfig(zk_client) projects_manager = GlobalProjectsManager(zk_client) thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS) source_manager = SourceManager(zk_client, thread_pool) source_manager.configure_automatic_fetch(projects_manager) options.define('private_ip', appscale_info.get_private_ip()) options.define('syslog_server', appscale_info.get_headnode_ip()) options.define('db_proxy', appscale_info.get_db_proxy()) options.define('tq_proxy', appscale_info.get_tq_proxy()) options.define('secret', appscale_info.get_secret()) running_instances = recover_state(zk_client) PeriodicCallback(stop_failed_instances, INSTANCE_CLEANUP_INTERVAL * 1000).start() app = tornado.web.Application([ ('/versions/([a-z0-9-_]+)', VersionHandler), ('/versions/([a-z0-9-_]+)/([0-9-]+)', InstanceHandler) ]) app.listen(constants.APP_MANAGER_PORT) logging.info('Starting AppManager on {}'.format(constants.APP_MANAGER_PORT)) io_loop = IOLoop.current() io_loop.run_sync(populate_api_servers)