Beispiel #1
0
    def test_get_zk_node_ips(self):
        flexmock(file_io).should_receive("read").\
          and_return({"locations":["ip1", "ip2"],"last_updated_at":0})
        flexmock(json).should_receive("loads").\
          and_return({"locations":[u'ip1', u'ip2'],"last_updated_at":0})
        self.assertEquals(appscale_info.get_zk_node_ips(), [u'ip1', u'ip2'])

        flexmock(file_io).should_receive("read").and_raise(IOError)
        self.assertEquals(appscale_info.get_zk_node_ips(), [])
  def test_get_zk_node_ips(self):
    flexmock(file_io).should_receive("read").\
      and_return({"locations":["ip1", "ip2"],"last_updated_at":0})
    flexmock(json).should_receive("loads").\
      and_return({"locations":[u'ip1', u'ip2'],"last_updated_at":0})
    self.assertEquals(appscale_info.get_zk_node_ips(), [u'ip1', u'ip2'])

    flexmock(file_io).should_receive("read").and_raise(IOError)
    self.assertEquals(appscale_info.get_zk_node_ips(), [])
  def test_get_zk_node_ips(self):
    # File exists
    open_mock = MagicMock()
    open_mock.return_value.__enter__.return_value = StringIO('ip1\nip2')
    with patch.object(appscale_info, 'open', open_mock):
      self.assertEquals(appscale_info.get_zk_node_ips(), [u'ip1', u'ip2'])
      open_mock.assert_called_once_with('/etc/appscale/zookeeper_locations')

    # IO Error
    open_mock = MagicMock()
    open_mock.return_value.__enter__.side_effect = IOError('Boom')
    with patch.object(appscale_info, 'open', open_mock):
      self.assertEquals(appscale_info.get_zk_node_ips(), [])
      open_mock.assert_called_once_with('/etc/appscale/zookeeper_locations')
Beispiel #4
0
def main():
    global datastore_path
    global deployment_config

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser()
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=DEFAULT_PORT,
                        required=True,
                        help="The blobstore server's port")
    parser.add_argument('-d',
                        '--datastore-path',
                        required=True,
                        help='The location of the datastore server')
    args = parser.parse_args()

    datastore_path = args.datastore_path
    zk_ips = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_ips))
    zk_client.start()
    deployment_config = DeploymentConfig(zk_client)
    setup_env()

    register_location(zk_client, appscale_info.get_private_ip(), args.port)

    http_server = tornado.httpserver.HTTPServer(
        Application(), max_buffer_size=MAX_REQUEST_BUFF_SIZE, xheaders=True)

    http_server.listen(args.port)

    logger.info('Starting BlobServer on {}'.format(args.port))
    tornado.ioloop.IOLoop.instance().start()
def main():
  """ Starts the groomer. """
  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser()
  parser.add_argument('-v', '--verbose', action='store_true',
                      help='Output debug-level logging')
  args = parser.parse_args()

  if args.verbose:
    logger.setLevel(logging.DEBUG)

  zk_hosts = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_hosts),
                          connection_retry=ZK_PERSISTENT_RECONNECTS,
                          command_retry=KazooRetry(max_tries=-1))
  zk_client.start()

  db_access = DatastoreProxy()

  thread_pool = ThreadPoolExecutor(4)

  TransactionGroomer(zk_client, db_access, thread_pool)
  logger.info('Starting transaction groomer')

  IOLoop.current().start()
def main():
  """ Main function which initializes and starts the tornado server. """
  # Parse command line arguments
  parser = argparse.ArgumentParser(description='A taskqueue API server')
  parser.add_argument('--port', '-p', default='17447',
                      help='TaskQueue server port')
  parser.add_argument('--verbose', action='store_true',
                      help='Output debug-level logging')
  args = parser.parse_args()
  if args.verbose:
    logger.setLevel(logging.DEBUG)

  # Configure zookeeper and db access
  zk_client = KazooClient(
    hosts=','.join(appscale_info.get_zk_node_ips()),
    connection_retry=ZK_PERSISTENT_RECONNECTS)
  zk_client.start()
  db_access = DatastoreProxy()

  # Initialize tornado server
  task_queue = distributed_tq.DistributedTaskQueue(db_access, zk_client)
  tq_application = prepare_taskqueue_application(task_queue)
  # Automatically decompress incoming requests.
  server = httpserver.HTTPServer(tq_application, decompress_request=True)
  server.listen(args.port)

  # Make sure taskqueue shuts down gracefully when signal is received
  graceful_shutdown = prepare_graceful_shutdown(zk_client, server)
  signal.signal(signal.SIGTERM, graceful_shutdown)
  signal.signal(signal.SIGINT, graceful_shutdown)

  logger.info('Starting TaskQueue server on port {}'.format(args.port))
  ioloop.IOLoop.current().start()
Beispiel #7
0
def main():
    file_io.set_logging_format()
    logging.getLogger().setLevel(logging.INFO)

    zk_ips = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_ips))
    zk_client.start()

    deployment_config = DeploymentConfig(zk_client)
    projects_manager = GlobalProjectsManager(zk_client)
    thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
    source_manager = SourceManager(zk_client, thread_pool)
    source_manager.configure_automatic_fetch(projects_manager)
    monit_operator = MonitOperator()

    options.define('private_ip', appscale_info.get_private_ip())
    options.define('syslog_server', appscale_info.get_headnode_ip())
    options.define('db_proxy', appscale_info.get_db_proxy())
    options.define('tq_proxy', appscale_info.get_tq_proxy())
    options.define('secret', appscale_info.get_secret())

    routing_client = RoutingClient(zk_client, options.private_ip,
                                   options.secret)
    instance_manager = InstanceManager(zk_client, monit_operator,
                                       routing_client, projects_manager,
                                       deployment_config, source_manager,
                                       options.syslog_server, thread_pool,
                                       options.private_ip)
    instance_manager.start()

    logger.info('Starting AppManager')

    io_loop = IOLoop.current()
    io_loop.run_sync(instance_manager.populate_api_servers)
    io_loop.start()
Beispiel #8
0
def main():
  file_io.set_logging_format()
  logging.getLogger().setLevel(logging.INFO)

  zk_ips = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_ips))
  zk_client.start()

  deployment_config = DeploymentConfig(zk_client)
  projects_manager = GlobalProjectsManager(zk_client)
  thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
  source_manager = SourceManager(zk_client, thread_pool)
  source_manager.configure_automatic_fetch(projects_manager)
  monit_operator = MonitOperator()

  options.define('private_ip', appscale_info.get_private_ip())
  options.define('syslog_server', appscale_info.get_headnode_ip())
  options.define('db_proxy', appscale_info.get_db_proxy())
  options.define('load_balancer_ip', appscale_info.get_load_balancer_ips()[0])
  options.define('tq_proxy', appscale_info.get_tq_proxy())
  options.define('secret', appscale_info.get_secret())

  routing_client = RoutingClient(zk_client, options.private_ip, options.secret)
  instance_manager = InstanceManager(
    zk_client, monit_operator, routing_client, projects_manager,
    deployment_config, source_manager, options.syslog_server, thread_pool,
    options.private_ip)
  instance_manager.start()

  logger.info('Starting AppManager')

  io_loop = IOLoop.current()
  io_loop.run_sync(instance_manager.populate_api_servers)
  io_loop.start()
Beispiel #9
0
def main():
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser(description='Backup UA Server data.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    # Configure zookeeper and db access
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    connect_to_postgres(zk_client)

    schema_cols_num = len(USERS_SCHEMA)

    table = get_table_sync(db, table_name, USERS_SCHEMA)

    create_backup_dir(BACKUP_FILE_LOCATION)

    backup_timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
    output_file = '{0}ua_server_{1}.csv'.\
      format(BACKUP_FILE_LOCATION, backup_timestamp)

    # v1 output format
    with open(output_file, 'w') as fout:
        writer = csv.DictWriter(fout, delimiter=',', fieldnames=USERS_SCHEMA)
        writer.writeheader()
        rows = [dict(zip(USERS_SCHEMA, row)) for row in table]
        writer.writerows(rows)
Beispiel #10
0
def main():
  global datastore_path
  global deployment_config

  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser()
  parser.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
                      required=True, help="The blobstore server's port")
  parser.add_argument('-d', '--datastore-path', required=True,
                      help='The location of the datastore server')
  args = parser.parse_args()

  datastore_path = args.datastore_path
  zk_ips = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_ips))
  zk_client.start()
  deployment_config = DeploymentConfig(zk_client)
  setup_env()

  http_server = tornado.httpserver.HTTPServer(
    Application(), max_buffer_size=MAX_REQUEST_BUFF_SIZE, xheaders=True)

  http_server.listen(args.port)

  # Make sure this server is accessible from each of the load balancers.
  secret = appscale_info.get_secret()
  for load_balancer in appscale_info.get_load_balancer_ips():
    acc = AppControllerClient(load_balancer, secret)
    acc.add_routing_for_blob_server()

  logger.info('Starting BlobServer on {}'.format(args.port))
  tornado.ioloop.IOLoop.instance().start()
Beispiel #11
0
def main():
    """ Starts the groomer. """
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    zk_hosts = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_hosts),
                            connection_retry=ZK_PERSISTENT_RECONNECTS,
                            command_retry=KazooRetry(max_tries=-1))
    zk_client.start()

    db_access = DatastoreProxy()

    thread_pool = ThreadPoolExecutor(4)

    TransactionGroomer(zk_client, db_access, thread_pool)
    logger.info('Starting transaction groomer')

    IOLoop.current().start()
Beispiel #12
0
def main():
    """ Main function which initializes and starts the tornado server. """
    # Parse command line arguments
    parser = argparse.ArgumentParser(description='A taskqueue API server')
    parser.add_argument('--port',
                        '-p',
                        default='17447',
                        help='TaskQueue server port')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()
    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    # Configure zookeeper and db access
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    db_access = DatastoreProxy()

    # Initialize tornado server
    task_queue = distributed_tq.DistributedTaskQueue(db_access, zk_client)
    tq_application = prepare_taskqueue_application(task_queue)
    # Automatically decompress incoming requests.
    server = httpserver.HTTPServer(tq_application, decompress_request=True)
    server.listen(args.port)

    # Make sure taskqueue shuts down gracefully when signal is received
    graceful_shutdown = prepare_graceful_shutdown(zk_client, server)
    signal.signal(signal.SIGTERM, graceful_shutdown)
    signal.signal(signal.SIGINT, graceful_shutdown)

    logger.info('Starting TaskQueue server on port {}'.format(args.port))
    ioloop.IOLoop.current().start()
Beispiel #13
0
    def test_get_zk_node_ips(self):
        # File exists
        open_mock = MagicMock()
        open_mock.return_value.__enter__.return_value = StringIO('ip1\nip2')
        with patch.object(appscale_info, 'open', open_mock):
            self.assertEquals(appscale_info.get_zk_node_ips(),
                              [u'ip1', u'ip2'])
            open_mock.assert_called_once_with(
                '/etc/appscale/zookeeper_locations')

        # IO Error
        open_mock = MagicMock()
        open_mock.return_value.__enter__.side_effect = IOError('Boom')
        with patch.object(appscale_info, 'open', open_mock):
            self.assertEquals(appscale_info.get_zk_node_ips(), [])
            open_mock.assert_called_once_with(
                '/etc/appscale/zookeeper_locations')
Beispiel #14
0
def main():
    """ Starts the AdminServer. """
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser()
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=constants.DEFAULT_PORT,
                        help='The port to listen on')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    options.define('secret', appscale_info.get_secret())
    options.define('login_ip', appscale_info.get_login_ip())
    options.define('private_ip', appscale_info.get_private_ip())

    acc = appscale_info.get_appcontroller_client()
    ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret)
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE)
    thread_pool = ThreadPoolExecutor(4)
    monit_operator = MonitOperator()
    all_resources = {
        'acc': acc,
        'ua_client': ua_client,
        'zk_client': zk_client,
        'version_update_lock': version_update_lock,
        'thread_pool': thread_pool
    }

    if options.private_ip in appscale_info.get_taskqueue_nodes():
        logging.info('Starting push worker manager')
        GlobalPushWorkerManager(zk_client, monit_operator)

    app = web.Application([
        ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions',
         VersionsHandler, all_resources),
        ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions/([a-z0-9-]+)',
         VersionHandler, all_resources),
        ('/v1/apps/([a-z0-9-]+)/operations/([a-z0-9-]+)', OperationsHandler),
        ('/api/queue/update', UpdateQueuesHandler, {
            'zk_client': zk_client
        })
    ])
    logging.info('Starting AdminServer')
    app.listen(args.port)
    io_loop = IOLoop.current()
    io_loop.start()
Beispiel #15
0
def main():
    """ Main function which initializes and starts the tornado server. """
    parser = argparse.ArgumentParser(description='A taskqueue API server')
    parser.add_argument('--port',
                        '-p',
                        default='17447',
                        help='TaskQueue server port')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    global task_queue

    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()

    db_access = DatastoreProxy()
    task_queue = distributed_tq.DistributedTaskQueue(db_access, zk_client)
    handlers = [
        # Takes protocol buffers from the AppServers.
        (r"/*", MainHandler)
    ]

    # Provides compatibility with the v1beta2 REST API.
    handlers.extend([(RESTQueue.PATH, RESTQueue, {
        'queue_handler': task_queue
    }), (RESTTasks.PATH, RESTTasks, {
        'queue_handler': task_queue
    }), (RESTLease.PATH, RESTLease, {
        'queue_handler': task_queue
    }), (RESTTask.PATH, RESTTask, {
        'queue_handler': task_queue
    })])

    tq_application = tornado.web.Application(handlers)

    server = tornado.httpserver.HTTPServer(
        tq_application,
        decompress_request=True)  # Automatically decompress incoming requests.
    server.listen(args.port)

    while 1:
        try:
            logger.info('Starting TaskQueue server on port {}'.format(
                args.port))
            tornado.ioloop.IOLoop.instance().start()
        except KeyboardInterrupt:
            logger.warning('Server interrupted by user, terminating...')
            sys.exit(1)
Beispiel #16
0
def main():
    """ Main. """
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    options.define('secret', appscale_info.get_secret())

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)

    my_ip = appscale_info.get_private_ip()
    is_master = (my_ip == appscale_info.get_headnode_ip())
    is_lb = (my_ip in appscale_info.get_load_balancer_ips())
    is_tq = (my_ip in appscale_info.get_taskqueue_nodes())
    is_db = (my_ip in appscale_info.get_db_ips())

    if is_master:
        global zk_client
        zk_client = KazooClient(hosts=','.join(
            appscale_info.get_zk_node_ips()),
                                connection_retry=ZK_PERSISTENT_RECONNECTS)
        zk_client.start()
        # Start watching profiling configs in ZooKeeper
        stats_app.ProfilingManager(zk_client)

    app = tornado.web.Application(
        stats_app.get_local_stats_api_routes(is_lb, is_tq, is_db) +
        stats_app.get_cluster_stats_api_routes(is_master),
        debug=False)
    app.listen(constants.HERMES_PORT)

    # Start loop for accepting http requests.
    IOLoop.instance().start()

    logger.info("Hermes is up and listening on port: {}.".format(
        constants.HERMES_PORT))
Beispiel #17
0
def main():
  """ Main. """
  parser = argparse.ArgumentParser()
  parser.add_argument(
    '-v', '--verbose', action='store_true',
    help='Output debug-level logging')
  args = parser.parse_args()

  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
  if args.verbose:
    logging.getLogger().setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())

  signal.signal(signal.SIGTERM, signal_handler)
  signal.signal(signal.SIGINT, signal_handler)

  my_ip = appscale_info.get_private_ip()
  is_master = (my_ip == appscale_info.get_headnode_ip())
  is_lb = (my_ip in appscale_info.get_load_balancer_ips())
  is_tq = (my_ip in appscale_info.get_taskqueue_nodes())
  is_db = (my_ip in appscale_info.get_db_ips())

  if is_master:
    global zk_client
    zk_client = KazooClient(
      hosts=','.join(appscale_info.get_zk_node_ips()),
      connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    # Start watching profiling configs in ZooKeeper
    stats_app.ProfilingManager(zk_client)

  app = tornado.web.Application(
    stats_app.get_local_stats_api_routes(is_lb, is_tq, is_db)
    + stats_app.get_cluster_stats_api_routes(is_master),
    debug=False
  )
  app.listen(constants.HERMES_PORT)

  # Start loop for accepting http requests.
  IOLoop.instance().start()

  logger.info("Hermes is up and listening on port: {}."
               .format(constants.HERMES_PORT))
Beispiel #18
0
def get_node_info():
    """ Creates a list of JSON objects that contain node information and are
  needed to perform a backup/restore task on the current AppScale deployment.
  """

    # TODO
    # Add logic for choosing minimal set of nodes that need to perform a task.
    # e.g. Only the node that owns the entire keyspace.

    nodes = [{
        NodeInfoTags.HOST:
        get_br_service_url(appscale_info.get_db_master_ip()),
        NodeInfoTags.ROLE:
        'db_master',
        NodeInfoTags.INDEX:
        None
    }]

    index = 0
    for node in appscale_info.get_db_slave_ips():
        host = get_br_service_url(node)
        # Make sure we don't send the same request on DB roles that reside on the
        # same node.
        if host not in nodes[0].values():
            nodes.append({
                NodeInfoTags.HOST: host,
                NodeInfoTags.ROLE: 'db_slave',
                NodeInfoTags.INDEX: index
            })
            index += 1

    index = 0
    for node in appscale_info.get_zk_node_ips():
        nodes.append({
            NodeInfoTags.HOST: get_br_service_url(node),
            NodeInfoTags.ROLE: 'zk',
            NodeInfoTags.INDEX: index
        })
        index += 1

    return nodes
def ensure_api_server(project_id):
    """ Make sure there is a running API server for a project.

  Args:
    project_id: A string specifying the project ID.
  Returns:
    An integer specifying the API server port.
  """
    global api_servers
    if project_id in api_servers:
        raise gen.Return(api_servers[project_id])

    server_port = MAX_API_SERVER_PORT
    for port in api_servers.values():
        if port <= server_port:
            server_port = port - 1

    zk_locations = appscale_info.get_zk_node_ips()
    start_cmd = ' '.join([
        API_SERVER_LOCATION, '--port',
        str(server_port), '--project-id', project_id, '--zookeeper-locations',
        ' '.join(zk_locations)
    ])

    watch = ''.join([API_SERVER_PREFIX, project_id])
    full_watch = '-'.join([watch, str(server_port)])
    pidfile = os.path.join(VAR_DIR, '{}.pid'.format(full_watch))
    monit_app_configuration.create_config_file(
        watch,
        start_cmd,
        pidfile,
        server_port,
        max_memory=DEFAULT_MAX_APPSERVER_MEMORY,
        check_port=True)

    monit_operator = MonitOperator()
    yield monit_operator.reload(thread_pool)
    yield monit_operator.send_command_retry_process(full_watch, 'start')

    api_servers[project_id] = server_port
    raise gen.Return(server_port)
def ensure_api_server(project_id):
    """ Make sure there is a running API server for a project.

  Args:
    project_id: A string specifying the project ID.
  Returns:
    An integer specifying the API server port.
  """
    global api_servers
    if project_id in api_servers:
        return api_servers[project_id]

    server_port = MAX_API_SERVER_PORT
    for port in api_servers.values():
        if port <= server_port:
            server_port = port - 1

    zk_locations = appscale_info.get_zk_node_ips()
    start_cmd = ' '.join([
        API_SERVER_LOCATION, '--port',
        str(server_port), '--project-id', project_id, '--zookeeper-locations',
        ' '.join(zk_locations)
    ])

    watch = ''.join([API_SERVER_PREFIX, project_id])
    full_watch = '-'.join([watch, str(server_port)])
    pidfile = os.path.join(PID_DIR, '{}.pid'.format(full_watch))
    monit_app_configuration.create_config_file(
        watch,
        start_cmd,
        pidfile,
        server_port,
        max_memory=DEFAULT_MAX_APPSERVER_MEMORY,
        check_port=True)

    assert monit_interface.start(
        full_watch,
        is_group=False), ('Monit was unable to start {}'.format(watch))

    api_servers[project_id] = server_port
    return server_port
Beispiel #21
0
def ensure_api_server(project_id):
  """ Make sure there is a running API server for a project.

  Args:
    project_id: A string specifying the project ID.
  Returns:
    An integer specifying the API server port.
  """
  global api_servers
  if project_id in api_servers:
    return api_servers[project_id]

  server_port = MAX_API_SERVER_PORT
  for port in api_servers.values():
    if port <= server_port:
      server_port = port - 1

  zk_locations = appscale_info.get_zk_node_ips()
  start_cmd = ' '.join([API_SERVER_LOCATION,
                        '--port', str(server_port),
                        '--project-id', project_id,
                        '--zookeeper-locations', ' '.join(zk_locations)])

  watch = ''.join([API_SERVER_PREFIX, project_id])
  full_watch = '-'.join([watch, str(server_port)])
  pidfile = os.path.join(PID_DIR, '{}.pid'.format(full_watch))
  monit_app_configuration.create_config_file(
    watch,
    start_cmd,
    pidfile,
    server_port,
    max_memory=DEFAULT_MAX_APPSERVER_MEMORY,
    check_port=True)

  assert monit_interface.start(full_watch, is_group=False), (
    'Monit was unable to start {}'.format(watch))

  api_servers[project_id] = server_port
  return server_port
Beispiel #22
0
def main():
    global datastore_path
    global deployment_config

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser()
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=DEFAULT_PORT,
                        required=True,
                        help="The blobstore server's port")
    parser.add_argument('-d',
                        '--datastore-path',
                        required=True,
                        help='The location of the datastore server')
    args = parser.parse_args()

    datastore_path = args.datastore_path
    zk_ips = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_ips))
    zk_client.start()
    deployment_config = DeploymentConfig(zk_client)
    setup_env()

    http_server = tornado.httpserver.HTTPServer(
        Application(), max_buffer_size=MAX_REQUEST_BUFF_SIZE, xheaders=True)

    http_server.listen(args.port)

    # Make sure this server is accessible from each of the load balancers.
    secret = appscale_info.get_secret()
    for load_balancer in appscale_info.get_load_balancer_ips():
        acc = AppControllerClient(load_balancer, secret)
        acc.add_routing_for_blob_server()

    logger.info('Starting BlobServer on {}'.format(args.port))
    tornado.ioloop.IOLoop.instance().start()
Beispiel #23
0
  def _ensure_api_server(self, project_id):
    """ Make sure there is a running API server for a project.

    Args:
      project_id: A string specifying the project ID.
    Returns:
      An integer specifying the API server port.
    """
    if project_id in self._api_servers:
      raise gen.Return(self._api_servers[project_id])

    server_port = MAX_API_SERVER_PORT
    for port in self._api_servers.values():
      if port <= server_port:
        server_port = port - 1

    zk_locations = appscale_info.get_zk_node_ips()
    start_cmd = ' '.join([API_SERVER_LOCATION,
                          '--port', str(server_port),
                          '--project-id', project_id,
                          '--zookeeper-locations', ' '.join(zk_locations)])

    watch = ''.join([API_SERVER_PREFIX, project_id])
    full_watch = '-'.join([watch, str(server_port)])
    pidfile = os.path.join(VAR_DIR, '{}.pid'.format(full_watch))
    monit_app_configuration.create_config_file(
      watch,
      start_cmd,
      pidfile,
      server_port,
      max_memory=DEFAULT_MAX_APPSERVER_MEMORY,
      check_port=True)

    yield self._monit_operator.reload(self._thread_pool)
    yield self._monit_operator.send_command_retry_process(full_watch, 'start')

    self._api_servers[project_id] = server_port
    raise gen.Return(server_port)
Beispiel #24
0
def get_node_info():
  """ Creates a list of JSON objects that contain node information and are
  needed to perform a backup/restore task on the current AppScale deployment.
  """

  # TODO
  # Add logic for choosing minimal set of nodes that need to perform a task.
  # e.g. Only the node that owns the entire keyspace.

  nodes = [{
    NodeInfoTags.HOST: get_br_service_url(appscale_info.get_db_master_ip()),
    NodeInfoTags.ROLE: 'db_master',
    NodeInfoTags.INDEX: None
  }]

  index = 0
  for node in appscale_info.get_db_slave_ips():
    host = get_br_service_url(node)
    # Make sure we don't send the same request on DB roles that reside on the
    # same node.
    if host not in nodes[0].values():
      nodes.append({
        NodeInfoTags.HOST: host,
        NodeInfoTags.ROLE: 'db_slave',
        NodeInfoTags.INDEX: index
      })
      index += 1

  index = 0
  for node in appscale_info.get_zk_node_ips():
    nodes.append({
      NodeInfoTags.HOST: get_br_service_url(node),
      NodeInfoTags.ROLE: 'zk',
      NodeInfoTags.INDEX: index
    })
    index += 1

  return nodes
Beispiel #25
0
def main():
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser(description='Restore UA Server data.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    parser.add_argument('-i',
                        '--input',
                        help='File with UA Server backup',
                        required=True)
    args = parser.parse_args()

    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    # Configure zookeeper and db access
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    connect_to_postgres(zk_client)

    input_file = args.input

    with open(input_file, 'r') as fin:
        reader = csv.DictReader(fin, delimiter=',')
        # Iterate through all users in file
        for row in reader:
            if not row['applications']:
                row['applications'] = None
            else:
                # delete square brackets added by csv module
                apps = row['applications'][1:-1]
                # csv module adds extra quotes each time
                apps = apps.replace("'", "")
                row['applications'] = '{' + apps + '}'
            put_entity_sync(db, table_name, row['email'], USERS_SCHEMA, row)
Beispiel #26
0
def prime_cassandra(replication):
  """ Create Cassandra keyspace and initial tables.

  Args:
    replication: An integer specifying the replication factor for the keyspace.
  Raises:
    AppScaleBadArg if replication factor is not greater than 0.
    TypeError if replication is not an integer.
  """
  if not isinstance(replication, int):
    raise TypeError('Replication must be an integer')

  if int(replication) <= 0:
    raise dbconstants.AppScaleBadArg('Replication must be greater than zero')

  zk_client = KazooClient(hosts=appscale_info.get_zk_node_ips())
  zk_client.start()

  hosts = appscale_info.get_db_ips()

  remaining_retries = INITIAL_CONNECT_RETRIES
  while True:
    try:
      cluster = Cluster(hosts, load_balancing_policy=LB_POLICY)
      session = cluster.connect()
      break
    except cassandra.cluster.NoHostAvailable as connection_error:
      remaining_retries -= 1
      if remaining_retries < 0:
        raise connection_error
      time.sleep(3)
  session.default_consistency_level = ConsistencyLevel.QUORUM

  create_keyspace = """
    CREATE KEYSPACE IF NOT EXISTS "{keyspace}"
    WITH REPLICATION = %(replication)s
  """.format(keyspace=KEYSPACE)
  keyspace_replication = {'class': 'SimpleStrategy',
                          'replication_factor': replication}
  session.execute(create_keyspace, {'replication': keyspace_replication},
                  timeout=SCHEMA_CHANGE_TIMEOUT)
  session.set_keyspace(KEYSPACE)

  logger.info('Waiting for all hosts to be connected')
  deadline = time.time() + SCHEMA_CHANGE_TIMEOUT
  while True:
    if time.time() > deadline:
      logger.warning('Timeout when waiting for hosts to join. Continuing '
                      'with connected hosts.')
      break

    if len(session.get_pool_state()) == len(hosts):
      break

    time.sleep(1)

  for table in dbconstants.INITIAL_TABLES:
    create_table = """
      CREATE TABLE IF NOT EXISTS "{table}" (
        {key} blob,
        {column} text,
        {value} blob,
        PRIMARY KEY ({key}, {column})
      ) WITH COMPACT STORAGE
    """.format(table=table,
               key=ThriftColumn.KEY,
               column=ThriftColumn.COLUMN_NAME,
               value=ThriftColumn.VALUE)
    statement = SimpleStatement(create_table, retry_policy=NO_RETRIES)

    logger.info('Trying to create {}'.format(table))
    try:
      session.execute(statement, timeout=SCHEMA_CHANGE_TIMEOUT)
    except cassandra.OperationTimedOut:
      logger.warning(
        'Encountered an operation timeout while creating {} table. Waiting {} '
        'seconds for schema to settle.'.format(table, SCHEMA_CHANGE_TIMEOUT))
      time.sleep(SCHEMA_CHANGE_TIMEOUT)
      raise

  migrate_composite_index_metadata(cluster, session, zk_client)
  create_batch_tables(cluster, session)
  create_groups_table(session)
  create_transactions_table(session)
  create_pull_queue_tables(cluster, session)
  create_entity_ids_table(session)

  first_entity = session.execute(
    'SELECT * FROM "{}" LIMIT 1'.format(dbconstants.APP_ENTITY_TABLE))
  existing_entities = len(list(first_entity)) == 1

  define_ua_schema(session)

  metadata_insert = """
    INSERT INTO "{table}" ({key}, {column}, {value})
    VALUES (%(key)s, %(column)s, %(value)s)
  """.format(
    table=dbconstants.DATASTORE_METADATA_TABLE,
    key=ThriftColumn.KEY,
    column=ThriftColumn.COLUMN_NAME,
    value=ThriftColumn.VALUE
  )

  if existing_entities:
    current_version = current_datastore_version(session)
    if current_version == 1.0:
      # Instruct the groomer to reclean the indexes.
      parameters = {'key': bytearray(cassandra_interface.INDEX_STATE_KEY),
                    'column': cassandra_interface.INDEX_STATE_KEY,
                    'value': bytearray(str(IndexStates.DIRTY))}
      session.execute(metadata_insert, parameters)

      parameters = {'key': bytearray(cassandra_interface.VERSION_INFO_KEY),
                    'column': cassandra_interface.VERSION_INFO_KEY,
                    'value': bytearray(str(CURRENT_VERSION))}
      session.execute(metadata_insert, parameters)
  else:
    parameters = {'key': bytearray(cassandra_interface.VERSION_INFO_KEY),
                  'column': cassandra_interface.VERSION_INFO_KEY,
                  'value': bytearray(str(CURRENT_VERSION))}
    session.execute(metadata_insert, parameters)

    # Mark the newly created indexes as clean.
    parameters = {'key': bytearray(cassandra_interface.INDEX_STATE_KEY),
                  'column': cassandra_interface.INDEX_STATE_KEY,
                  'value': bytearray(str(IndexStates.CLEAN))}
    session.execute(metadata_insert, parameters)

    # Indicate that scatter property values do not need to be populated.
    parameters = {'key': bytearray(cassandra_interface.SCATTER_PROP_KEY),
                  'column': cassandra_interface.SCATTER_PROP_KEY,
                  'value': bytearray(ScatterPropStates.POPULATED)}
    session.execute(metadata_insert, parameters)

  # Indicate that the database has been successfully primed.
  parameters = {'key': bytearray(cassandra_interface.PRIMED_KEY),
                'column': cassandra_interface.PRIMED_KEY,
                'value': bytearray(str(CURRENT_VERSION))}
  session.execute(metadata_insert, parameters)
  logger.info('Cassandra is primed.')
Beispiel #27
0
def prime_cassandra(replication):
    """ Create Cassandra keyspace and initial tables.

  Args:
    replication: An integer specifying the replication factor for the keyspace.
  Raises:
    AppScaleBadArg if replication factor is not greater than 0.
    TypeError if replication is not an integer.
  """
    if not isinstance(replication, int):
        raise TypeError('Replication must be an integer')

    if int(replication) <= 0:
        raise dbconstants.AppScaleBadArg(
            'Replication must be greater than zero')

    zk_client = KazooClient(hosts=appscale_info.get_zk_node_ips())
    zk_client.start()

    hosts = appscale_info.get_db_ips()

    remaining_retries = INITIAL_CONNECT_RETRIES
    while True:
        try:
            cluster = Cluster(hosts, load_balancing_policy=LB_POLICY)
            session = cluster.connect()
            break
        except cassandra.cluster.NoHostAvailable as connection_error:
            remaining_retries -= 1
            if remaining_retries < 0:
                raise connection_error
            time.sleep(3)
    session.default_consistency_level = ConsistencyLevel.QUORUM

    create_keyspace = """
    CREATE KEYSPACE IF NOT EXISTS "{keyspace}"
    WITH REPLICATION = %(replication)s
  """.format(keyspace=KEYSPACE)
    keyspace_replication = {
        'class': 'SimpleStrategy',
        'replication_factor': replication
    }
    session.execute(create_keyspace, {'replication': keyspace_replication},
                    timeout=SCHEMA_CHANGE_TIMEOUT)
    session.set_keyspace(KEYSPACE)

    logger.info('Waiting for all hosts to be connected')
    deadline = time.time() + SCHEMA_CHANGE_TIMEOUT
    while True:
        if time.time() > deadline:
            logger.warning(
                'Timeout when waiting for hosts to join. Continuing '
                'with connected hosts.')
            break

        if len(session.get_pool_state()) == len(hosts):
            break

        time.sleep(1)

    for table in dbconstants.INITIAL_TABLES:
        create_table = """
      CREATE TABLE IF NOT EXISTS "{table}" (
        {key} blob,
        {column} text,
        {value} blob,
        PRIMARY KEY ({key}, {column})
      ) WITH COMPACT STORAGE
    """.format(table=table,
               key=ThriftColumn.KEY,
               column=ThriftColumn.COLUMN_NAME,
               value=ThriftColumn.VALUE)
        statement = SimpleStatement(create_table, retry_policy=NO_RETRIES)

        logger.info('Trying to create {}'.format(table))
        try:
            session.execute(statement, timeout=SCHEMA_CHANGE_TIMEOUT)
        except cassandra.OperationTimedOut:
            logger.warning(
                'Encountered an operation timeout while creating {} table. Waiting {} '
                'seconds for schema to settle.'.format(table,
                                                       SCHEMA_CHANGE_TIMEOUT))
            time.sleep(SCHEMA_CHANGE_TIMEOUT)
            raise

    migrate_composite_index_metadata(cluster, session, zk_client)
    create_batch_tables(cluster, session)
    create_groups_table(session)
    create_transactions_table(session)
    create_entity_ids_table(session)

    first_entity = session.execute('SELECT * FROM "{}" LIMIT 1'.format(
        dbconstants.APP_ENTITY_TABLE))
    existing_entities = len(list(first_entity)) == 1

    define_ua_schema(session)

    metadata_insert = """
    INSERT INTO "{table}" ({key}, {column}, {value})
    VALUES (%(key)s, %(column)s, %(value)s)
  """.format(table=dbconstants.DATASTORE_METADATA_TABLE,
             key=ThriftColumn.KEY,
             column=ThriftColumn.COLUMN_NAME,
             value=ThriftColumn.VALUE)

    if existing_entities:
        current_version = current_datastore_version(session)
        if current_version == 1.0:
            # Instruct the groomer to reclean the indexes.
            parameters = {
                'key': bytearray(cassandra_interface.INDEX_STATE_KEY),
                'column': cassandra_interface.INDEX_STATE_KEY,
                'value': bytearray(str(IndexStates.DIRTY))
            }
            session.execute(metadata_insert, parameters)

            parameters = {
                'key': bytearray(cassandra_interface.VERSION_INFO_KEY),
                'column': cassandra_interface.VERSION_INFO_KEY,
                'value': bytearray(str(CURRENT_VERSION))
            }
            session.execute(metadata_insert, parameters)
    else:
        parameters = {
            'key': bytearray(cassandra_interface.VERSION_INFO_KEY),
            'column': cassandra_interface.VERSION_INFO_KEY,
            'value': bytearray(str(CURRENT_VERSION))
        }
        session.execute(metadata_insert, parameters)

        # Mark the newly created indexes as clean.
        parameters = {
            'key': bytearray(cassandra_interface.INDEX_STATE_KEY),
            'column': cassandra_interface.INDEX_STATE_KEY,
            'value': bytearray(str(IndexStates.CLEAN))
        }
        session.execute(metadata_insert, parameters)

        # Indicate that scatter property values do not need to be populated.
        parameters = {
            'key': bytearray(cassandra_interface.SCATTER_PROP_KEY),
            'column': cassandra_interface.SCATTER_PROP_KEY,
            'value': bytearray(ScatterPropStates.POPULATED)
        }
        session.execute(metadata_insert, parameters)

    # Indicate that the database has been successfully primed.
    parameters = {
        'key': bytearray(cassandra_interface.PRIMED_KEY),
        'column': cassandra_interface.PRIMED_KEY,
        'value': bytearray(str(CURRENT_VERSION))
    }
    session.execute(metadata_insert, parameters)
    logger.info('Cassandra is primed.')
Beispiel #28
0
def main():
  """ Starts the AdminServer. """
  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser(
    prog='appscale-admin', description='Manages AppScale-related processes')
  subparsers = parser.add_subparsers(dest='command')
  subparsers.required = True

  serve_parser = subparsers.add_parser(
    'serve', description='Starts the server that manages AppScale processes')
  serve_parser.add_argument(
    '-p', '--port', type=int, default=constants.DEFAULT_PORT,
    help='The port to listen on')
  serve_parser.add_argument(
    '-v', '--verbose', action='store_true', help='Output debug-level logging')

  subparsers.add_parser(
    'summary', description='Lists AppScale processes running on this machine')
  restart_parser = subparsers.add_parser(
    'restart',
    description='Restart AppScale processes running on this machine')
  restart_parser.add_argument('service', nargs='+',
                              help='The process or service ID to restart')

  args = parser.parse_args()
  if args.command == 'summary':
    table = sorted(list(get_combined_services().items()))
    print(tabulate(table, headers=['Service', 'State']))
    sys.exit(0)

  if args.command == 'restart':
    socket_path = urlquote(ServiceManagerHandler.SOCKET_PATH, safe='')
    session = requests_unixsocket.Session()
    response = session.post(
      'http+unix://{}/'.format(socket_path),
      data={'command': 'restart', 'arg': [args.service]})
    response.raise_for_status()
    return

  if args.verbose:
    logger.setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())
  options.define('login_ip', appscale_info.get_login_ip())
  options.define('private_ip', appscale_info.get_private_ip())
  options.define('load_balancers', appscale_info.get_load_balancer_ips())

  acc = appscale_info.get_appcontroller_client()
  ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret)
  zk_client = KazooClient(
    hosts=','.join(appscale_info.get_zk_node_ips()),
    connection_retry=ZK_PERSISTENT_RECONNECTS)
  zk_client.start()
  version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE)
  thread_pool = ThreadPoolExecutor(4)
  monit_operator = MonitOperator()
  all_resources = {
    'acc': acc,
    'ua_client': ua_client,
    'zk_client': zk_client,
    'version_update_lock': version_update_lock,
    'thread_pool': thread_pool
  }

  if options.private_ip in appscale_info.get_taskqueue_nodes():
    logger.info('Starting push worker manager')
    GlobalPushWorkerManager(zk_client, monit_operator)

  service_manager = ServiceManager(zk_client)
  service_manager.start()

  app = web.Application([
    ('/oauth/token', OAuthHandler, {'ua_client': ua_client}),
    ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions', VersionsHandler,
     {'ua_client': ua_client, 'zk_client': zk_client,
      'version_update_lock': version_update_lock, 'thread_pool': thread_pool}),
    ('/v1/projects', ProjectsHandler, all_resources),
    ('/v1/projects/([a-z0-9-]+)', ProjectHandler, all_resources),
    ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)', ServiceHandler,
     all_resources),
    ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions/([a-z0-9-]+)',
     VersionHandler, all_resources),
    ('/v1/apps/([a-z0-9-]+)/operations/([a-z0-9-]+)', OperationsHandler,
     {'ua_client': ua_client}),
    ('/api/cron/update', UpdateCronHandler,
     {'acc': acc, 'zk_client': zk_client, 'ua_client': ua_client}),
    ('/api/datastore/index/add', UpdateIndexesHandler,
     {'zk_client': zk_client, 'ua_client': ua_client}),
    ('/api/queue/update', UpdateQueuesHandler,
     {'zk_client': zk_client, 'ua_client': ua_client})
  ])
  logger.info('Starting AdminServer')
  app.listen(args.port)

  management_app = web.Application([
    ('/', ServiceManagerHandler, {'service_manager': service_manager})])
  management_server = HTTPServer(management_app)
  management_socket = bind_unix_socket(ServiceManagerHandler.SOCKET_PATH)
  management_server.add_socket(management_socket)

  io_loop = IOLoop.current()
  io_loop.start()
Beispiel #29
0
  def _ensure_api_server(self, project_id, runtime):
    """ Make sure there is a running API server for a project.

    Args:
      project_id: A string specifying the project ID.
      runtime: The runtime for the project
    Returns:
      An integer specifying the API server port.
    """
    ensure_app_server_api = runtime==JAVA8
    if project_id in self._api_servers:
      api_server_ports = self._api_servers[project_id]
      if not ensure_app_server_api:
        raise gen.Return(api_server_ports[0])
      elif len(api_server_ports) > 1:
          raise gen.Return(api_server_ports[1])

    server_port = MAX_API_SERVER_PORT
    for ports in self._api_servers.values():
      for port in ports:
        if port <= server_port:
          server_port = port - 1

    full_watch = None
    if not project_id in self._api_servers:
      watch = ''.join([API_SERVER_PREFIX, project_id])
      full_watch = '-'.join([watch, str(server_port)])
      pidfile = os.path.join(VAR_DIR, '{}.pid'.format(full_watch))
      zk_locations = appscale_info.get_zk_node_ips()
      start_cmd = ' '.join([API_SERVER_LOCATION,
                          '--port', str(server_port),
                          '--project-id', project_id,
                          '--zookeeper-locations', ' '.join(zk_locations)])
      monit_app_configuration.create_config_file(
        watch,
        start_cmd,
        pidfile,
        server_port,
        max_memory=DEFAULT_MAX_APPSERVER_MEMORY,
        check_port=True,
        check_host='127.0.0.1')
      api_server_port = server_port
    else:
      api_server_port = self._api_servers[project_id][0]

    full_watch_app = None
    if ensure_app_server_api:
      # Start an Python 27 runtime API server
      if api_server_port==server_port:
        server_port -= 1
      watch = ''.join([API_SERVER_PREFIX, '1_', project_id])
      full_watch_app = '-'.join([watch, str(server_port)])
      pidfile = os.path.join(VAR_DIR, '{}.pid'.format(full_watch_app))
      start_cmd = create_python_api_start_cmd(project_id,
                                              self._login_server,
                                              server_port,
                                              pidfile,
                                              api_server_port)
      monit_app_configuration.create_config_file(
        watch,
        start_cmd,
        pidfile,
        server_port,
        max_memory=DEFAULT_MAX_APPSERVER_MEMORY,
        check_port=True,
        check_host='127.0.0.1',
        group='api-server')
      self._api_servers[project_id] = [api_server_port, server_port]
    else:
      self._api_servers[project_id] = [server_port]

    yield self._monit_operator.reload(self._thread_pool)
    if full_watch:
      yield self._monit_operator.send_command_retry_process(full_watch, 'start')
    if full_watch_app:
      yield self._monit_operator.send_command_retry_process(full_watch_app, 'start')

    raise gen.Return(server_port)
Beispiel #30
0
  def delete(version_key, port):
    """ Stops an AppServer instance on this machine. """
    try:
      yield stop_app_instance(version_key, int(port))
    except BadConfigurationException as error:
      raise HTTPError(HTTPCodes.BAD_REQUEST, error.message)


################################
# MAIN
################################
if __name__ == "__main__":
  file_io.set_logging_format()
  logging.getLogger().setLevel(logging.INFO)

  zk_ips = appscale_info.get_zk_node_ips()
  zk_client = KazooClient(hosts=','.join(zk_ips))
  zk_client.start()

  deployment_config = DeploymentConfig(zk_client)
  projects_manager = GlobalProjectsManager(zk_client)
  thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
  source_manager = SourceManager(zk_client, thread_pool)

  options.define('private_ip', appscale_info.get_private_ip())
  options.define('syslog_server', appscale_info.get_headnode_ip())
  options.define('db_proxy', appscale_info.get_db_proxy())
  options.define('tq_proxy', appscale_info.get_tq_proxy())

  app = tornado.web.Application([
    ('/versions/([a-z0-9-_]+)', VersionHandler),
Beispiel #31
0
def main():
  """ Main. """
  parser = argparse.ArgumentParser()
  parser.add_argument(
    '-v', '--verbose', action='store_true',
    help='Output debug-level logging')
  args = parser.parse_args()

  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
  if args.verbose:
    logging.getLogger().setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())

  signal.signal(signal.SIGTERM, signal_handler)
  signal.signal(signal.SIGINT, signal_handler)

  my_ip = appscale_info.get_private_ip()
  is_master = (my_ip == appscale_info.get_headnode_ip())
  is_lb = (my_ip in appscale_info.get_load_balancer_ips())
  is_tq = (my_ip in appscale_info.get_taskqueue_nodes())

  if is_master:
    # Periodically check with the portal for new tasks.
    # Note: Currently, any active handlers from the tornado app will block
    # polling until they complete.
    PeriodicCallback(poll, constants.POLLING_INTERVAL).start()

    # Only master Hermes node handles /do_task route
    task_route = ('/do_task', TaskHandler)

    global zk_client
    zk_client = KazooClient(
      hosts=','.join(appscale_info.get_zk_node_ips()),
      connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    # Start watching profiling configs in ZooKeeper
    stats_app.ProfilingManager(zk_client)

    # Periodically checks if the deployment is registered and uploads the
    # appscalesensor app for registered deployments.
    sensor_deployer = SensorDeployer(zk_client)
    PeriodicCallback(sensor_deployer.deploy,
                     constants.UPLOAD_SENSOR_INTERVAL).start()
  else:
    task_route = ('/do_task', Respond404Handler,
                  dict(reason='Hermes slaves do not manage tasks from Portal'))

  app = tornado.web.Application([
      ("/", MainHandler),
      task_route,
    ]
    + stats_app.get_local_stats_api_routes(is_lb, is_tq)
    + stats_app.get_cluster_stats_api_routes(is_master),
    debug=False
  )
  app.listen(constants.HERMES_PORT)

  # Start loop for accepting http requests.
  IOLoop.instance().start()

  logging.info("Hermes is up and listening on port: {}."
               .format(constants.HERMES_PORT))
Beispiel #32
0
def main():
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser(description='Backup UA Server data.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    # Configure zookeeper and db access
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    connect_to_postgres(zk_client)

    datastore_type = 'cassandra'

    ERROR_CODES = appscale_datastore.DatastoreFactory.error_codes()

    db = appscale_datastore.DatastoreFactory.getDatastore(datastore_type)

    # Keep trying until it gets the schema.
    backoff = 5
    retries = 3
    while retries >= 0:
        try:
            user_schema = db.get_schema_sync(USERS_TABLE)
        except AppScaleDBConnectionError:
            retries -= 1
            time.sleep(backoff)
            continue

        if user_schema[0] in ERROR_CODES:
            user_schema = user_schema[1:]
        else:
            retries -= 1
            time.sleep(backoff)
            continue
        break

    # If no response from cassandra
    if retries == -1:
        raise AppScaleDBConnectionError('No response from cassandra.')

    schema_cols_num = len(USERS_SCHEMA)

    if pg_connection_wrapper:
        table = get_table_sync(db, table_name, USERS_SCHEMA)
    else:
        table = get_table_sync(db, USERS_TABLE, user_schema)[1:]
        reshaped_table = reshape(table, schema_cols_num)

    create_backup_dir(BACKUP_FILE_LOCATION)

    backup_timestamp = time.strftime("%Y-%m-%d_%H-%M-%S")
    output_file = '{0}ua_server_{1}.csv'.\
      format(BACKUP_FILE_LOCATION, backup_timestamp)

    # v1 output format
    with open(output_file, 'w') as fout:
        writer = csv.DictWriter(fout, delimiter=',', fieldnames=USERS_SCHEMA)
        writer.writeheader()
        if pg_connection_wrapper:
            rows = [dict(zip(USERS_SCHEMA, row)) for row in table]
        else:
            prepare_for_backup(reshaped_table)
            rows = [dict(zip(USERS_SCHEMA, row)) for row in reshaped_table]
        writer.writerows(rows)
Beispiel #33
0
def main():
    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

    parser = argparse.ArgumentParser(description='Restore UA Server data.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    parser.add_argument('-i',
                        '--input',
                        help='File with UA Server backup',
                        required=True)
    args = parser.parse_args()

    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    # Configure zookeeper and db access
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    connect_to_postgres(zk_client)

    datastore_type = 'cassandra'

    ERROR_CODES = appscale_datastore.DatastoreFactory.error_codes()

    db = appscale_datastore.DatastoreFactory.getDatastore(datastore_type)

    # Keep trying until it gets the schema.
    backoff = 5
    retries = 3
    while retries >= 0:
        try:
            user_schema = db.get_schema_sync(USERS_TABLE)
        except AppScaleDBConnectionError:
            retries -= 1
            time.sleep(backoff)
            continue

        if user_schema[0] in ERROR_CODES:
            user_schema = user_schema[1:]
        else:
            retries -= 1
            time.sleep(backoff)
            continue
        break

    # If no response from cassandra
    if retries == -1:
        raise AppScaleDBConnectionError('No response from cassandra.')

    input_file = args.input

    with open(input_file, 'r') as fin:
        reader = csv.DictReader(fin, delimiter=',')
        # Iterate through all users in file
        for row in reader:
            if pg_connection_wrapper:
                if not row['applications']:
                    row['applications'] = None
                else:
                    # delete square brackets added by csv module
                    apps = row['applications'][1:-1]
                    # csv module adds extra quotes each time
                    apps = apps.replace("'", "")
                    row['applications'] = '{' + apps + '}'
                put_entity_sync(db, table_name, row['email'], USERS_SCHEMA,
                                row)
            else:
                # Convert dates to timestamp
                t = str(
                    time.mktime(
                        datetime.datetime.strptime(
                            row['date_creation'],
                            '%Y-%m-%d %H:%M:%S').timetuple()))
                row['date_creation'] = t
                t = str(
                    time.mktime(
                        datetime.datetime.strptime(
                            row['date_change'],
                            '%Y-%m-%d %H:%M:%S').timetuple()))
                row['date_change'] = t
                t = str(
                    time.mktime(
                        datetime.datetime.strptime(
                            row['date_last_login'],
                            '%Y-%m-%d %H:%M:%S').timetuple()))
                row['date_last_login'] = t

                apps = row['applications'][1:-1]
                apps = apps.replace("'", "").replace(', ', ':')
                row['applications'] = apps

                array = [row[key] for key in USERS_SCHEMA]
                put_entity_sync(db, USERS_TABLE, array[0], user_schema, array)
    def delete(version_key, port):
        """ Stops an AppServer instance on this machine. """
        try:
            yield stop_app_instance(version_key, int(port))
        except BadConfigurationException as error:
            raise HTTPError(HTTPCodes.BAD_REQUEST, error.message)


################################
# MAIN
################################
if __name__ == "__main__":
    file_io.set_logging_format()
    logging.getLogger().setLevel(logging.INFO)

    zk_ips = appscale_info.get_zk_node_ips()
    zk_client = KazooClient(hosts=','.join(zk_ips))
    zk_client.start()

    deployment_config = DeploymentConfig(zk_client)
    projects_manager = GlobalProjectsManager(zk_client)
    thread_pool = ThreadPoolExecutor(MAX_BACKGROUND_WORKERS)
    source_manager = SourceManager(zk_client, thread_pool)
    source_manager.configure_automatic_fetch(projects_manager)

    options.define('private_ip', appscale_info.get_private_ip())
    options.define('syslog_server', appscale_info.get_headnode_ip())
    options.define('db_proxy', appscale_info.get_db_proxy())
    options.define('tq_proxy', appscale_info.get_tq_proxy())
    options.define('secret', appscale_info.get_secret())
Beispiel #35
0
    def _ensure_api_server(self, project_id, runtime):
        """ Make sure there is a running API server for a project.

    Args:
      project_id: A string specifying the project ID.
      runtime: The runtime for the project
    Returns:
      An integer specifying the API server port and list of api services.
    """
        ensure_app_server_api = runtime == JAVA8
        if project_id in self._api_servers:
            api_server_ports = self._api_servers[project_id]
            if not ensure_app_server_api:
                raise gen.Return((api_server_ports[0], [
                    'appscale-api-server@{}-{}'.format(
                        project_id, str(api_server_ports[0]))
                ]))
            elif len(api_server_ports) > 1:
                raise gen.Return((api_server_ports[1], [
                    'appscale-api-server@{}-{}'.format(
                        project_id, str(api_server_ports[0])),
                    'appscale-api-server@1_{}-{}'.format(
                        project_id, str(api_server_ports[1]))
                ]))

        server_port = MAX_API_SERVER_PORT
        for ports in self._api_servers.values():
            for port in ports:
                if port <= server_port:
                    server_port = port - 1

        api_services = []
        if not project_id in self._api_servers:
            watch = ''.join([API_SERVER_PREFIX, project_id])
            zk_locations = appscale_info.get_zk_node_ips()
            start_cmd = ' '.join([
                API_SERVER_LOCATION, '--port',
                str(server_port), '--project-id', project_id,
                '--zookeeper-locations', ' '.join(zk_locations)
            ])

            api_command_file_path = (
                '/run/appscale/apps/api_command_{}-{}'.format(
                    project_id, str(server_port)))
            api_command_content = 'exec {}'.format(start_cmd)
            file_io.write(api_command_file_path, api_command_content)

            api_server_port = server_port
        else:
            api_server_port = self._api_servers[project_id][0]
        api_services.append('appscale-api-server@{}-{}'.format(
            project_id, str(api_server_port)))

        if ensure_app_server_api:
            # Start an Python 27 runtime API server
            if api_server_port == server_port:
                server_port -= 1
            start_cmd = create_python_api_start_cmd(project_id,
                                                    self._login_server,
                                                    server_port,
                                                    api_server_port)

            api_command_file_path = (
                '/run/appscale/apps/api_command_1_{}-{}'.format(
                    project_id, str(server_port)))
            api_command_content = 'exec {}'.format(start_cmd)
            file_io.write(api_command_file_path, api_command_content)

            api_services.append('appscale-api-server@{}-{}'.format(
                project_id, str(server_port)))

            self._api_servers[project_id] = [api_server_port, server_port]
        else:
            self._api_servers[project_id] = [server_port]

        raise gen.Return((server_port, api_services))
Beispiel #36
0
def main():
    """ Main. """
    parser = argparse.ArgumentParser()
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    options.define('secret', appscale_info.get_secret())

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)

    my_ip = appscale_info.get_private_ip()
    is_master = (my_ip == appscale_info.get_headnode_ip())
    is_lb = (my_ip in appscale_info.get_load_balancer_ips())
    is_tq = (my_ip in appscale_info.get_taskqueue_nodes())

    if is_master:
        # Periodically check with the portal for new tasks.
        # Note: Currently, any active handlers from the tornado app will block
        # polling until they complete.
        PeriodicCallback(poll, constants.POLLING_INTERVAL).start()

        # Only master Hermes node handles /do_task route
        task_route = ('/do_task', TaskHandler)

        global zk_client
        zk_client = KazooClient(hosts=','.join(
            appscale_info.get_zk_node_ips()),
                                connection_retry=ZK_PERSISTENT_RECONNECTS)
        zk_client.start()
        # Start watching profiling configs in ZooKeeper
        stats_app.ProfilingManager(zk_client)

        # Periodically checks if the deployment is registered and uploads the
        # appscalesensor app for registered deployments.
        sensor_deployer = SensorDeployer(zk_client)
        PeriodicCallback(sensor_deployer.deploy,
                         constants.UPLOAD_SENSOR_INTERVAL).start()
    else:
        task_route = (
            '/do_task', Respond404Handler,
            dict(reason='Hermes slaves do not manage tasks from Portal'))

    app = tornado.web.Application(
        [
            ("/", MainHandler),
            task_route,
        ] + stats_app.get_local_stats_api_routes(is_lb, is_tq) +
        stats_app.get_cluster_stats_api_routes(is_master),
        debug=False)
    app.listen(constants.HERMES_PORT)

    # Start loop for accepting http requests.
    IOLoop.instance().start()

    logging.info("Hermes is up and listening on port: {}.".format(
        constants.HERMES_PORT))
Beispiel #37
0
def main():
  """ Starts the AdminServer. """
  logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)

  parser = argparse.ArgumentParser(
    prog='appscale-admin', description='Manages AppScale-related processes')
  subparsers = parser.add_subparsers(dest='command')
  subparsers.required = True

  serve_parser = subparsers.add_parser(
    'serve', description='Starts the server that manages AppScale processes')
  serve_parser.add_argument(
    '-p', '--port', type=int, default=constants.DEFAULT_PORT,
    help='The port to listen on')
  serve_parser.add_argument(
    '-v', '--verbose', action='store_true', help='Output debug-level logging')

  subparsers.add_parser(
    'summary', description='Lists AppScale processes running on this machine')

  args = parser.parse_args()
  if args.command == 'summary':
    table = sorted(list(get_combined_services().items()))
    print(tabulate(table, headers=['Service', 'State']))
    sys.exit(0)

  if args.verbose:
    logger.setLevel(logging.DEBUG)

  options.define('secret', appscale_info.get_secret())
  options.define('login_ip', appscale_info.get_login_ip())
  options.define('private_ip', appscale_info.get_private_ip())
  options.define('load_balancers', appscale_info.get_load_balancer_ips())

  acc = appscale_info.get_appcontroller_client()
  ua_client = UAClient(appscale_info.get_db_master_ip(), options.secret)
  zk_client = KazooClient(
    hosts=','.join(appscale_info.get_zk_node_ips()),
    connection_retry=ZK_PERSISTENT_RECONNECTS)
  zk_client.start()
  version_update_lock = zk_client.Lock(constants.VERSION_UPDATE_LOCK_NODE)
  thread_pool = ThreadPoolExecutor(4)
  monit_operator = MonitOperator()
  all_resources = {
    'acc': acc,
    'ua_client': ua_client,
    'zk_client': zk_client,
    'version_update_lock': version_update_lock,
    'thread_pool': thread_pool
  }

  if options.private_ip in appscale_info.get_taskqueue_nodes():
    logger.info('Starting push worker manager')
    GlobalPushWorkerManager(zk_client, monit_operator)

  service_manager = ServiceManager(zk_client)
  service_manager.start()

  app = web.Application([
    ('/oauth/token', OAuthHandler, {'ua_client': ua_client}),
    ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions', VersionsHandler,
     all_resources),
    ('/v1/projects', ProjectsHandler, all_resources),
    ('/v1/projects/([a-z0-9-]+)', ProjectHandler, all_resources),
    ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)', ServiceHandler,
     all_resources),
    ('/v1/apps/([a-z0-9-]+)/services/([a-z0-9-]+)/versions/([a-z0-9-]+)',
     VersionHandler, all_resources),
    ('/v1/apps/([a-z0-9-]+)/operations/([a-z0-9-]+)', OperationsHandler,
     {'ua_client': ua_client}),
    ('/api/cron/update', UpdateCronHandler,
     {'acc': acc, 'zk_client': zk_client, 'ua_client': ua_client}),
    ('/api/queue/update', UpdateQueuesHandler,
     {'zk_client': zk_client, 'ua_client': ua_client})
  ])
  logger.info('Starting AdminServer')
  app.listen(args.port)
  io_loop = IOLoop.current()
  io_loop.start()