Exemplo n.º 1
0
  def test_get_indices(self):
    session = flexmock(default_consistency_level=None)
    cluster = flexmock(connect=lambda keyspace: session)
    flexmock(appscale_info).should_receive('get_db_ips')
    flexmock(Cluster).new_instances(cluster)
    flexmock(DatastoreProxy).should_receive('range_query').and_return({})
    db_batch = DatastoreProxy()

    self.assertEquals(db_batch.get_indices("appid"), [])
Exemplo n.º 2
0
  def test_get_indices(self):
    session = flexmock(default_consistency_level=None)
    cluster = flexmock(connect=lambda keyspace: session)
    flexmock(appscale_info).should_receive('get_db_ips')
    flexmock(Cluster).new_instances(cluster)
    flexmock(DatastoreProxy).should_receive('range_query').and_return({})
    db_batch = DatastoreProxy()

    self.assertEquals(db_batch.get_indices("appid"), [])
Exemplo n.º 3
0
def main():
    """ Main function which initializes and starts the tornado server. """
    # Parse command line arguments
    parser = argparse.ArgumentParser(description='A taskqueue API server')
    parser.add_argument('--port',
                        '-p',
                        default='17447',
                        help='TaskQueue server port')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()
    if args.verbose:
        logging.getLogger('appscale').setLevel(logging.DEBUG)

    # Configure zookeeper and db access
    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()
    db_access = DatastoreProxy()

    # Initialize tornado server
    task_queue = distributed_tq.DistributedTaskQueue(db_access, zk_client)
    tq_application = prepare_taskqueue_application(task_queue)
    # Automatically decompress incoming requests.
    server = httpserver.HTTPServer(tq_application, decompress_request=True)
    server.listen(args.port)

    # Make sure taskqueue shuts down gracefully when signal is received
    graceful_shutdown = prepare_graceful_shutdown(zk_client, server)
    signal.signal(signal.SIGTERM, graceful_shutdown)
    signal.signal(signal.SIGINT, graceful_shutdown)

    logger.info('Starting TaskQueue server on port {}'.format(args.port))
    ioloop.IOLoop.current().start()
Exemplo n.º 4
0
def main():
    """ Main function which initializes and starts the tornado server. """
    parser = argparse.ArgumentParser(description='A taskqueue API server')
    parser.add_argument('--port',
                        '-p',
                        default='17447',
                        help='TaskQueue server port')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    global task_queue

    db_access = DatastoreProxy()
    task_queue = distributed_tq.DistributedTaskQueue(db_access)
    handlers = [
        # Takes JSON requests from AppController.
        (r"/startworker", StartWorkerHandler),
        (r"/stopworker", StopWorkerHandler),
        (r"/reloadworker", ReloadWorkerHandler),
        # Takes protocol buffers from the AppServers.
        (r"/*", MainHandler)
    ]

    # Provides compatibility with the v1beta2 REST API.
    handlers.extend([(RESTQueue.PATH, RESTQueue, {
        'queue_handler': task_queue
    }), (RESTTasks.PATH, RESTTasks, {
        'queue_handler': task_queue
    }), (RESTLease.PATH, RESTLease, {
        'queue_handler': task_queue
    }), (RESTTask.PATH, RESTTask, {
        'queue_handler': task_queue
    })])

    tq_application = tornado.web.Application(handlers)

    server = tornado.httpserver.HTTPServer(
        tq_application,
        decompress_request=True)  # Automatically decompress incoming requests.
    server.listen(args.port)

    while 1:
        try:
            logger.info('Starting TaskQueue server on port {}'.format(
                args.port))
            tornado.ioloop.IOLoop.instance().start()
        except KeyboardInterrupt:
            logger.warning('Server interrupted by user, terminating...')
            sys.exit(1)
Exemplo n.º 5
0
def main():
    """ Main function which initializes and starts the tornado server. """
    parser = argparse.ArgumentParser(description='A taskqueue API server')
    parser.add_argument('--port',
                        '-p',
                        default='17447',
                        help='TaskQueue server port')
    parser.add_argument('--verbose',
                        action='store_true',
                        help='Output debug-level logging')
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    global task_queue

    zk_client = KazooClient(hosts=','.join(appscale_info.get_zk_node_ips()),
                            connection_retry=ZK_PERSISTENT_RECONNECTS)
    zk_client.start()

    db_access = DatastoreProxy()
    task_queue = distributed_tq.DistributedTaskQueue(db_access, zk_client)
    handlers = [
        # Takes protocol buffers from the AppServers.
        (r"/*", MainHandler)
    ]

    # Provides compatibility with the v1beta2 REST API.
    handlers.extend([(RESTQueue.PATH, RESTQueue, {
        'queue_handler': task_queue
    }), (RESTTasks.PATH, RESTTasks, {
        'queue_handler': task_queue
    }), (RESTLease.PATH, RESTLease, {
        'queue_handler': task_queue
    }), (RESTTask.PATH, RESTTask, {
        'queue_handler': task_queue
    })])

    tq_application = tornado.web.Application(handlers)

    global server
    server = tornado.httpserver.HTTPServer(
        tq_application,
        decompress_request=True)  # Automatically decompress incoming requests.
    server.listen(args.port)

    signal.signal(signal.SIGTERM, graceful_shutdown)
    signal.signal(signal.SIGINT, graceful_shutdown)

    logger.info('Starting TaskQueue server on port {}'.format(args.port))
    IOLoop.current().start()
Exemplo n.º 6
0
        relevant_ips = set(args.zookeeper) | set(args.database)
        for ip in relevant_ips:
            utils.ssh(ip, args.keyname, 'service monit start')

        start_zookeeper(args.zookeeper, args.keyname)
        conn = KazooClient(hosts=",".join(args.zookeeper))
        conn.start()
        if not conn.exists(ZK_CASSANDRA_CONFIG):
            conn.create(ZK_CASSANDRA_CONFIG,
                        json.dumps({"num_tokens": 256}),
                        makepath=True)
        start_cassandra(args.database, args.db_master, args.keyname,
                        args.zookeeper)
        datastore_upgrade.wait_for_quorum(args.keyname, args.db_master,
                                          len(args.database), args.replication)
        db_access = DatastoreProxy(hosts=args.database)

        # Exit early if a data layout upgrade is not needed.
        if db_access.valid_data_version():
            status = {
                'status': 'complete',
                'message': 'The data layout is valid'
            }
            sys.exit()

        zookeeper = datastore_upgrade.get_zookeeper(args.zookeeper)
        try:
            total_entities = datastore_upgrade.estimate_total_entities(
                db_access.session, args.db_master, args.keyname)
        except AppScaleDBError:
            total_entities = None
Exemplo n.º 7
0
  try:
    # Ensure monit is running.
    relevant_ips = set(args.zookeeper) | set(args.database)
    for ip in relevant_ips:
      ssh(ip, args.keyname, 'service monit start')

    start_zookeeper(args.zookeeper, args.keyname)
    conn = KazooClient(hosts=",".join(args.zookeeper))
    conn.start()
    if not conn.exists(ZK_CASSANDRA_CONFIG):
      conn.create(ZK_CASSANDRA_CONFIG, json.dumps({"num_tokens":256}),
                  makepath=True)
    start_cassandra(args.database, args.db_master, args.keyname, args.zookeeper)
    datastore_upgrade.wait_for_quorum(
      args.keyname, args.db_master, len(args.database), args.replication)
    db_access = DatastoreProxy(hosts=args.database)

    # Exit early if a data layout upgrade is not needed.
    if db_access.valid_data_version_sync():
      status = {'status': 'complete', 'message': 'The data layout is valid'}
      sys.exit()

    zookeeper = datastore_upgrade.get_zookeeper(args.zookeeper)
    try:
      total_entities = datastore_upgrade.estimate_total_entities(
        db_access.session, args.db_master, args.keyname)
    except AppScaleDBError:
      total_entities = None
    run_datastore_upgrade(db_access, zookeeper, args.log_postfix,
                          total_entities)
    status = {'status': 'complete', 'message': 'Data layout upgrade complete'}