def main(): global datastore_path global deployment_config logging.basicConfig(format=LOG_FORMAT, level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument('-p', '--port', type=int, default=DEFAULT_PORT, required=True, help="The blobstore server's port") parser.add_argument('-d', '--datastore-path', required=True, help='The location of the datastore server') args = parser.parse_args() datastore_path = args.datastore_path deployment_config = DeploymentConfig(appscale_info.get_zk_locations_string()) setup_env() http_server = tornado.httpserver.HTTPServer( Application(), max_buffer_size=MAX_REQUEST_BUFF_SIZE) http_server.listen(args.port) acc = appscale_info.get_appcontroller_client() acc.add_routing_for_blob_server() logging.info('Starting BlobServer on {}'.format(args.port)) tornado.ioloop.IOLoop.instance().start()
def main(): """ Starts a web service for handing datastore requests. """ global datastore_access global server_node global zookeeper zookeeper_locations = appscale_info.get_zk_locations_string() parser = argparse.ArgumentParser() parser.add_argument('-t', '--type', choices=dbconstants.VALID_DATASTORES, default=dbconstants.VALID_DATASTORES[0], help='Database type') parser.add_argument('-p', '--port', type=int, default=dbconstants.DEFAULT_PORT, help='Datastore server port') parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) options.define('private_ip', appscale_info.get_private_ip()) options.define('port', args.port) server_node = '{}/{}:{}'.format(DATASTORE_SERVERS_NODE, options.private_ip, options.port) datastore_batch = DatastoreFactory.getDatastore( args.type, log_level=logger.getEffectiveLevel()) zookeeper = zktransaction.ZKTransaction( host=zookeeper_locations, db_access=datastore_batch, log_level=logger.getEffectiveLevel()) zookeeper.handle.add_listener(zk_state_listener) zookeeper.handle.ensure_path(DATASTORE_SERVERS_NODE) # Since the client was started before adding the listener, make sure the # server node gets created. zk_state_listener(zookeeper.handle.state) zookeeper.handle.ChildrenWatch(DATASTORE_SERVERS_NODE, update_servers_watch) transaction_manager = TransactionManager(zookeeper.handle) datastore_access = DatastoreDistributed( datastore_batch, transaction_manager, zookeeper=zookeeper, log_level=logger.getEffectiveLevel()) server = tornado.httpserver.HTTPServer(pb_application) server.listen(args.port) IOLoop.current().start()
def main(): """ Starts a web service for handing datastore requests. """ global datastore_access zookeeper_locations = appscale_info.get_zk_locations_string() parser = argparse.ArgumentParser() parser.add_argument('-t', '--type', choices=dbconstants.VALID_DATASTORES, default=dbconstants.VALID_DATASTORES[0], help='Database type') parser.add_argument('-p', '--port', type=int, default=dbconstants.DEFAULT_PORT, help='Datastore server port') parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) datastore_batch = DatastoreFactory.getDatastore( args.type, log_level=logger.getEffectiveLevel()) zookeeper = zktransaction.ZKTransaction( host=zookeeper_locations, start_gc=True, db_access=datastore_batch, log_level=logger.getEffectiveLevel()) datastore_access = DatastoreDistributed( datastore_batch, zookeeper=zookeeper, log_level=logger.getEffectiveLevel()) server = tornado.httpserver.HTTPServer(pb_application) server.listen(args.port) tornado.ioloop.IOLoop.current().start()
def main(): """ Updates a composite index after prompting the user. """ parser = argparse.ArgumentParser(description='Updates composite indexes') parser.add_argument('--type', '-t', default='cassandra', help='The datastore backend type') parser.add_argument('--app_id', '-a', required=True, help='The project ID') parser.add_argument('--all', action='store_true', help='Updates all composite indexes') args = parser.parse_args() datastore_batch = appscale_datastore_batch.DatastoreFactory.\ getDatastore(args.type) zookeeper_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zookeeper_locations) transaction_manager = TransactionManager(zookeeper.handle) datastore_access = DatastoreDistributed(datastore_batch, transaction_manager, zookeeper=zookeeper) index_manager = IndexManager(zookeeper.handle, datastore_access) datastore_access.index_manager = index_manager indices = index_manager.projects[args.app_id].indexes_pb if len(indices) == 0: print('No composite indices found for app {}'.format(args.app_id)) zookeeper.close() return update_composite_index_sync = tornado_synchronous( datastore_access.update_composite_index) if args.all: for index in indices: update_composite_index_sync(args.app_id, index) print('Successfully updated all composite indexes') return selection = -1 selection_range = range(1, len(indices) + 1) while selection not in selection_range: for number, index in enumerate(indices, start=1): pretty_index = prettify_index(index.definition()) print('{}) {}'.format(number, pretty_index)) try: selection = int( raw_input( 'Select the index you want to update. (1-{}) '.format( len(indices)))) except KeyboardInterrupt: zookeeper.close() sys.exit() selected_index = indices[selection - 1] update_composite_index_sync(args.app_id, selected_index) zookeeper.close() print('Index successfully updated')
def register_location(host, port): """ Register service location with ZooKeeper. """ global zk_client zk_client = KazooClient(hosts=appscale_info.get_zk_locations_string(), connection_retry=ZK_PERSISTENT_RECONNECTS) zk_client.start() server_node = '{}/{}:{}'.format(UA_SERVERS_NODE, host, port) def create_server_node(): """ Creates a server registration entry in ZooKeeper. """ try: zk_client.retry(zk_client.create, server_node, ephemeral=True) except NodeExistsError: # If the server gets restarted, the old node may exist for a short time. zk_client.retry(zk_client.delete, server_node) zk_client.retry(zk_client.create, server_node, ephemeral=True) logger.info('UAServer registered at {}'.format(server_node)) def zk_state_listener(state): """ Handles changes to ZooKeeper connection state. Args: state: A string specifying the new ZooKeeper connection state. """ if state == KazooState.CONNECTED: threading.Thread(target=create_server_node).start() zk_client.add_listener(zk_state_listener) zk_client.ensure_path(UA_SERVERS_NODE) # Since the client was started before adding the listener, make sure the # server node gets created. zk_state_listener(zk_client.state)
def main(): """ This main function allows you to run the backup manually. """ parser = init_parser() args = parser.parse_args() # Set up logging. level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \ '%(lineno)s %(message)s ', level=level) logging.info("Logging started") message = "Backing up " if args.source_code: message += "source and " message += "data for: {0}".format(args.app_id) logging.info(message) zk_connection_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zk_connection_locations) db_info = appscale_info.get_db_info() table = db_info[':table'] skip_list = args.skip if not skip_list: skip_list = [] logging.info("Will skip the following kinds: {0}".format(sorted(skip_list))) ds_backup = DatastoreBackup(args.app_id, zookeeper, table, source_code=args.source_code, skip_list=sorted(skip_list)) try: ds_backup.run() finally: zookeeper.close()
def main(): """ This main function allows you to run the restore manually. """ # Parse CLI arguments. parser = init_parser() args = parser.parse_args() # Set up logging. level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \ '%(lineno)s %(message)s ', level=level) logger.info("Logging started") logger.info(args) zk_connection_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zk_connection_locations) # Verify app is deployed. if not app_is_deployed(args.app_id, zookeeper.handle): return # Verify backup dir exists. if not backup_dir_exists(args.backup_dir): return if args.clear_datastore: message = "Deleting \"{0}\" data...".\ format(args.app_id, args.backup_dir) logger.info(message) try: tables_to_clear = { APP_ENTITY_TABLE: APP_ENTITY_SCHEMA, ASC_PROPERTY_TABLE: PROPERTY_SCHEMA, DSC_PROPERTY_TABLE: PROPERTY_SCHEMA, COMPOSITE_TABLE: COMPOSITE_SCHEMA, APP_KIND_TABLE: APP_KIND_SCHEMA } for table, schema in tables_to_clear.items(): fetch_and_delete_entities('cassandra', table, schema, args.app_id, False) except Exception as exception: logger.error("Unhandled exception while deleting \"{0}\" data: {1} " \ "Exiting...".format(args.app_id, exception.message)) return # Initialize connection to Zookeeper and database related variables. db_info = appscale_info.get_db_info() table = db_info[':table'] # Start restore process. ds_restore = DatastoreRestore(args.app_id.strip('/'), args.backup_dir, zookeeper, table) try: ds_restore.run() finally: zookeeper.close()
def main(): """ This main function allows you to run the restore manually. """ # Parse CLI arguments. parser = init_parser() args = parser.parse_args() # Set up logging. level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \ '%(lineno)s %(message)s ', level=level) logger.info("Logging started") logger.info(args) zk_connection_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zk_connection_locations) # Verify app is deployed. if not app_is_deployed(args.app_id, zookeeper.handle): return # Verify backup dir exists. if not backup_dir_exists(args.backup_dir): return if args.clear_datastore: message = "Deleting \"{0}\" data...".\ format(args.app_id, args.backup_dir) logger.info(message) try: tables_to_clear = { APP_ENTITY_TABLE: APP_ENTITY_SCHEMA, ASC_PROPERTY_TABLE: PROPERTY_SCHEMA, DSC_PROPERTY_TABLE: PROPERTY_SCHEMA, COMPOSITE_TABLE: COMPOSITE_SCHEMA, APP_KIND_TABLE: APP_KIND_SCHEMA } for table, schema in tables_to_clear.items(): fetch_and_delete_entities('cassandra', table, schema, args.app_id, False) except Exception as exception: logger.error("Unhandled exception while deleting \"{0}\" data: {1} " \ "Exiting...".format(args.app_id, exception.message)) return # Initialize connection to Zookeeper and database related variables. db_info = appscale_info.get_db_info() table = db_info[':table'] # Start restore process. ds_restore = DatastoreRestore(args.app_id.strip('/'), args.backup_dir, zookeeper, table) try: ds_restore.run() finally: zookeeper.close()
def main(): """ Starts a web service for handing datastore requests. """ global datastore_access global server_node global zookeeper zookeeper_locations = appscale_info.get_zk_locations_string() parser = argparse.ArgumentParser() parser.add_argument('-t', '--type', choices=dbconstants.VALID_DATASTORES, default=dbconstants.VALID_DATASTORES[0], help='Database type') parser.add_argument('-p', '--port', type=int, default=dbconstants.DEFAULT_PORT, help='Datastore server port') parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() if args.verbose: logging.getLogger('appscale').setLevel(logging.DEBUG) options.define('private_ip', appscale_info.get_private_ip()) options.define('port', args.port) taskqueue_locations = get_load_balancer_ips() server_node = '{}/{}:{}'.format(DATASTORE_SERVERS_NODE, options.private_ip, options.port) datastore_batch = DatastoreFactory.getDatastore( args.type, log_level=logger.getEffectiveLevel()) zookeeper = zktransaction.ZKTransaction( host=zookeeper_locations, db_access=datastore_batch, log_level=logger.getEffectiveLevel()) zookeeper.handle.add_listener(zk_state_listener) zookeeper.handle.ensure_path(DATASTORE_SERVERS_NODE) # Since the client was started before adding the listener, make sure the # server node gets created. zk_state_listener(zookeeper.handle.state) zookeeper.handle.ChildrenWatch(DATASTORE_SERVERS_NODE, update_servers_watch) transaction_manager = TransactionManager(zookeeper.handle) datastore_access = DatastoreDistributed( datastore_batch, transaction_manager, zookeeper=zookeeper, log_level=logger.getEffectiveLevel(), taskqueue_locations=taskqueue_locations) index_manager = IndexManager(zookeeper.handle, datastore_access, perform_admin=True) datastore_access.index_manager = index_manager server = tornado.httpserver.HTTPServer(pb_application) server.listen(args.port) IOLoop.current().start()
def main(): """ Updates a composite index after prompting the user. """ try: opts, remainder = getopt.getopt( sys.argv[1:], 't:a:', ['type=', 'app_id=']) except getopt.GetoptError: usage() sys.exit(1) db_type = None app_id = None for opt, arg in opts: if opt in ('-t', '--type'): db_type = arg elif opt in ('-a', '--app_id'): app_id = arg if not db_type or not app_id: usage() sys.exit(1) datastore_batch = appscale_datastore_batch.DatastoreFactory.\ getDatastore(db_type) zookeeper_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zookeeper_locations) transaction_manager = TransactionManager(zookeeper.handle) datastore_access = DatastoreDistributed( datastore_batch, transaction_manager, zookeeper=zookeeper) pb_indices = datastore_access.datastore_batch.get_indices(app_id) indices = [datastore_pb.CompositeIndex(index) for index in pb_indices] if len(indices) == 0: print('No composite indices found for app {}'.format(app_id)) zookeeper.close() sys.exit(1) selection = -1 selection_range = range(1, len(indices) + 1) while selection not in selection_range: for number, index in enumerate(indices, start=1): pretty_index = prettify_index(index.definition()) print('{}) {}'.format(number, pretty_index)) try: selection = int(raw_input('Select the index you want to update. (1-{}) ' .format(len(indices)))) except KeyboardInterrupt: zookeeper.close() sys.exit() selected_index = indices[selection - 1] datastore_access.update_composite_index(app_id, selected_index) zookeeper.close()
def main(): """ Updates a composite index after prompting the user. """ parser = argparse.ArgumentParser(description='Updates composite indexes') parser.add_argument('--type', '-t', default='cassandra', help='The datastore backend type') parser.add_argument('--app_id', '-a', required=True, help='The project ID') parser.add_argument('--all', action='store_true', help='Updates all composite indexes') args = parser.parse_args() datastore_batch = appscale_datastore_batch.DatastoreFactory.\ getDatastore(args.type) zookeeper_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zookeeper_locations) transaction_manager = TransactionManager(zookeeper.handle) datastore_access = DatastoreDistributed( datastore_batch, transaction_manager, zookeeper=zookeeper) index_manager = IndexManager(zookeeper.handle, datastore_access) datastore_access.index_manager = index_manager indices = index_manager.projects[args.app_id].indexes_pb if len(indices) == 0: print('No composite indices found for app {}'.format(args.app_id)) zookeeper.close() return update_composite_index_sync = tornado_synchronous( datastore_access.update_composite_index) if args.all: for index in indices: update_composite_index_sync(args.app_id, index) print('Successfully updated all composite indexes') return selection = -1 selection_range = range(1, len(indices) + 1) while selection not in selection_range: for number, index in enumerate(indices, start=1): pretty_index = prettify_index(index.definition()) print('{}) {}'.format(number, pretty_index)) try: selection = int(raw_input('Select the index you want to update. (1-{}) ' .format(len(indices)))) except KeyboardInterrupt: zookeeper.close() sys.exit() selected_index = indices[selection - 1] update_composite_index_sync(args.app_id, selected_index) zookeeper.close() print('Index successfully updated')
def main(): logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logger = logging.getLogger(__name__) zookeeper_locations = appscale_info.get_zk_locations_string() gc_zookeeper = zk.ZKTransaction(host=zookeeper_locations, start_gc=False) logger.info("Using ZK locations {0}".format(zookeeper_locations)) ds_groomer = groomer.DatastoreGroomer(gc_zookeeper, "cassandra", LOCAL_DATASTORE) try: ds_groomer.start() except Exception, exception: logger.warning("An exception slipped through:") logger.exception(exception) logger.warning("Exiting service.")
def main(): logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logger = logging.getLogger(__name__) zookeeper_locations = appscale_info.get_zk_locations_string() gc_zookeeper = zk.ZKTransaction(host=zookeeper_locations) logger.info("Using ZK locations {0}".format(zookeeper_locations)) datastore_location = ':'.join([appscale_info.get_db_proxy(), str(DB_SERVER_PORT)]) ds_groomer = groomer.DatastoreGroomer(gc_zookeeper, "cassandra", datastore_location) try: ds_groomer.start() except Exception, exception: logger.warning("An exception slipped through:") logger.exception(exception) logger.warning("Exiting service.")
def main(): logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logger = logging.getLogger(__name__) zookeeper_locations = appscale_info.get_zk_locations_string() gc_zookeeper = zk.ZKTransaction(host=zookeeper_locations) logger.info("Using ZK locations {0}".format(zookeeper_locations)) datastore_location = ':'.join( [appscale_info.get_db_proxy(), str(DB_SERVER_PORT)]) ds_groomer = groomer.DatastoreGroomer(gc_zookeeper, "cassandra", datastore_location) try: ds_groomer.start() except Exception, exception: logger.warning("An exception slipped through:") logger.exception(exception) logger.warning("Exiting service.")
def main(): """ This main function allows you to run the backup manually. """ parser = init_parser() args = parser.parse_args() # Set up logging. level = logging.INFO if args.debug: level = logging.DEBUG logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s:' \ '%(lineno)s %(message)s ', level=level) logging.info("Logging started") message = "Backing up " if args.source_code: message += "source and " message += "data for: {0}".format(args.app_id) logging.info(message) zk_connection_locations = appscale_info.get_zk_locations_string() zookeeper = zk.ZKTransaction(host=zk_connection_locations) db_info = appscale_info.get_db_info() table = db_info[':table'] skip_list = args.skip if not skip_list: skip_list = [] logging.info("Will skip the following kinds: {0}".format( sorted(skip_list))) ds_backup = DatastoreBackup(args.app_id, zookeeper, table, source_code=args.source_code, skip_list=sorted(skip_list)) try: ds_backup.run() finally: zookeeper.close()
def main(): logging.basicConfig(level=logging.INFO, format=LOG_FORMAT) logger = logging.getLogger(__name__) zookeeper_locations = appscale_info.get_zk_locations_string() retry_policy = KazooRetry(max_tries=5) zk_client = KazooClient(zookeeper_locations, connection_retry=ZK_PERSISTENT_RECONNECTS, command_retry=retry_policy) zk_client.start() gc_zookeeper = zk.ZKTransaction(zk_client) logger.info("Using ZK locations {0}".format(zookeeper_locations)) datastore_location = ':'.join( [appscale_info.get_db_proxy(), str(DB_SERVER_PORT)]) ds_groomer = groomer.DatastoreGroomer(gc_zookeeper, "cassandra", datastore_location) try: ds_groomer.start() except Exception, exception: logger.warning("An exception slipped through:") logger.exception(exception) logger.warning("Exiting service.")
for ii in REQUIRED_CONFIG_FIELDS: try: if config[ii]: pass except KeyError: logging.error("Unable to find " + str(ii) + " in configuration") return False return True ################################ # MAIN ################################ if __name__ == "__main__": file_io.set_logging_format() deployment_config = DeploymentConfig( appscale_info.get_zk_locations_string()) INTERNAL_IP = appscale_info.get_private_ip() SERVER = SOAPpy.SOAPServer((INTERNAL_IP, constants.APP_MANAGER_PORT)) SERVER.registerFunction(start_app) SERVER.registerFunction(stop_app) SERVER.registerFunction(stop_app_instance) while 1: try: SERVER.serve_forever() except SSL.SSLError: pass
def main(): """ Starts a web service for handing datastore requests. """ global datastore_access global server_node global zk_client zookeeper_locations = appscale_info.get_zk_locations_string() if not zookeeper_locations: zookeeper_locations = 'localhost:2181' parser = argparse.ArgumentParser() parser.add_argument('-t', '--type', choices=dbconstants.VALID_DATASTORES, default=dbconstants.VALID_DATASTORES[0], help='Database type') parser.add_argument('--fdb-clusterfile', default=None, help='Location of FoundationDB clusterfile') parser.add_argument('-p', '--port', type=int, default=dbconstants.DEFAULT_PORT, help='Datastore server port') parser.add_argument('-v', '--verbose', action='store_true', help='Output debug-level logging') args = parser.parse_args() if args.verbose: logging.getLogger('appscale').setLevel(logging.DEBUG) options.define('private_ip', appscale_info.get_private_ip()) options.define('port', args.port) taskqueue_locations = get_load_balancer_ips() server_node = '{}/{}:{}'.format(DATASTORE_SERVERS_NODE, options.private_ip, options.port) retry_policy = KazooRetry(max_tries=5) zk_client = kazoo.client.KazooClient( hosts=zookeeper_locations, connection_retry=ZK_PERSISTENT_RECONNECTS, command_retry=retry_policy) zk_client.start() if args.type == 'cassandra': datastore_batch = DatastoreFactory.getDatastore( args.type, log_level=logger.getEffectiveLevel()) zookeeper = zktransaction.ZKTransaction( zk_client=zk_client, db_access=datastore_batch, log_level=logger.getEffectiveLevel()) transaction_manager = TransactionManager(zk_client) datastore_access = DatastoreDistributed( datastore_batch, transaction_manager, zookeeper=zookeeper, log_level=logger.getEffectiveLevel(), taskqueue_locations=taskqueue_locations) else: from appscale.datastore.fdb.fdb_datastore import FDBDatastore clusterfile_path = args.fdb_clusterfile if not clusterfile_path: try: clusterfile_content = zk_client.get(FDB_CLUSTERFILE_NODE)[0] clusterfile_path = '/run/appscale/appscale-datastore-fdb.cluster' with open(clusterfile_path, 'w') as clusterfile: clusterfile.write(clusterfile_content) except NoNodeError: logger.warning( 'Neither --fdb-clusterfile was specified nor {} ZK node exists,' 'FDB client will try to find clusterfile in one of default locations' .format(FDB_CLUSTERFILE_NODE)) datastore_access = FDBDatastore() datastore_access.start(clusterfile_path) zk_client.add_listener(zk_state_listener) zk_client.ensure_path(DATASTORE_SERVERS_NODE) # Since the client was started before adding the listener, make sure the # server node gets created. zk_state_listener(zk_client.state) zk_client.ChildrenWatch(DATASTORE_SERVERS_NODE, update_servers_watch) if args.type == 'cassandra': index_manager = IndexManager(zk_client, datastore_access, perform_admin=True) datastore_access.index_manager = index_manager server = tornado.httpserver.HTTPServer(pb_application) server.listen(args.port) IOLoop.current().start()