def _prepare_and_submit_blueprint(self, file_server_root, application_dir, blueprint_id): application_file = self._extract_application_file(file_server_root, application_dir) file_server_base_url = config.instance().file_server_base_uri dsl_path = '{0}/{1}'.format(file_server_base_url, application_file) alias_mapping = '{0}/{1}'.format(file_server_base_url, 'cloudify/alias-mappings.yaml') resources_base = file_server_base_url + '/' # add to blueprints manager (will also dsl_parse it) try: blueprint = get_blueprints_manager().publish_blueprint( dsl_path, alias_mapping, resources_base, blueprint_id) # moving the app directory in the file server to be under a # directory named after the blueprint id shutil.move(os.path.join(file_server_root, application_dir), os.path.join( file_server_root, config.instance().file_server_blueprints_folder, blueprint.id)) self._process_plugins(file_server_root, blueprint.id) return blueprint except DslParseException, ex: shutil.rmtree(os.path.join(file_server_root, application_dir)) raise manager_exceptions.InvalidBlueprintError( 'Invalid blueprint - {0}'.format(ex.args))
def get(self, blueprint_id): """ Download blueprint's archive """ # Verify blueprint exists. get_blueprints_manager().get_blueprint(blueprint_id, {'id'}) blueprint_path = '{0}/{1}/{2}/{2}.tar.gz'.format( config.instance().file_server_resources_uri, config.instance().file_server_uploaded_blueprints_folder, blueprint_id) local_path = os.path.join( config.instance().file_server_root, config.instance().file_server_uploaded_blueprints_folder, blueprint_id, '%s.tar.gz' % blueprint_id) response = make_response() response.headers['Content-Description'] = 'File Transfer' response.headers['Cache-Control'] = 'no-cache' response.headers['Content-Type'] = 'application/octet-stream' response.headers['Content-Disposition'] = \ 'attachment; filename=%s.tar.gz' % blueprint_id response.headers['Content-Length'] = os.path.getsize(local_path) response.headers['X-Accel-Redirect'] = blueprint_path response.headers['X-Accel-Buffering'] = 'yes' return response
def init_secured_app(_app): cfy_config = config.instance() if cfy_config.security_auth_token_generator: register_auth_token_generator( _app, config.instance().security_auth_token_generator) # init and configure flask-securest secure_app = SecuREST(_app) secure_app.logger = create_logger( logger_name='flask-securest', log_level=cfy_config.security_audit_log_level, log_file=cfy_config.security_audit_log_file, log_file_size_MB=cfy_config.security_audit_log_file_size_MB, log_files_backup_count=cfy_config.security_audit_log_files_backup_count ) if cfy_config.security_userstore_driver: register_userstore_driver(secure_app, cfy_config.security_userstore_driver) register_authentication_providers( secure_app, cfy_config.security_authentication_providers) def unauthorized_user_handler(): utils.abort_error( manager_exceptions.UnauthorizedError('user unauthorized'), current_app.logger, hide_server_message=True) secure_app.unauthorized_user_handler = unauthorized_user_handler secure_app.request_security_bypass_handler = \ request_security_bypass_handler
def init_secured_app(_app): cfy_config = config.instance() if cfy_config.auth_token_generator: register_auth_token_generator(_app, config.instance().auth_token_generator) # init and configure flask-securest secure_app = SecuREST(_app) secure_app.logger = create_logger( logger_name='flask-securest', log_level=cfy_config.securest_log_level, log_file=cfy_config.securest_log_file, log_file_size_MB=cfy_config.securest_log_file_size_MB, log_files_backup_count=cfy_config.securest_log_files_backup_count ) if cfy_config.securest_userstore_driver: register_userstore_driver(secure_app, cfy_config.securest_userstore_driver) register_authentication_providers( secure_app, cfy_config.securest_authentication_providers) def unauthorized_user_handler(): utils.abort_error( manager_exceptions.UnauthorizedError('user unauthorized'), current_app.logger, hide_server_message=True) secure_app.unauthorized_user_handler = unauthorized_user_handler if config.instance().security_bypass_port: secure_app.request_security_bypass_handler = \ request_security_bypass_handler
def _prepare_and_submit_blueprint(self, file_server_root, application_dir, blueprint_id): application_file = self._extract_application_file( file_server_root, application_dir) file_server_base_url = config.instance().file_server_base_uri dsl_path = '{0}/{1}'.format(file_server_base_url, application_file) alias_mapping = '{0}/{1}'.format(file_server_base_url, 'cloudify/alias-mappings.yaml') resources_base = file_server_base_url + '/' # add to blueprints manager (will also dsl_parse it) try: blueprint = get_blueprints_manager().publish_blueprint( dsl_path, alias_mapping, resources_base, blueprint_id) # moving the app directory in the file server to be under a # directory named after the blueprint id shutil.move( os.path.join(file_server_root, application_dir), os.path.join(file_server_root, config.instance().file_server_blueprints_folder, blueprint.id)) self._process_plugins(file_server_root, blueprint.id) return blueprint except DslParseException, ex: shutil.rmtree(os.path.join(file_server_root, application_dir)) raise manager_exceptions.InvalidBlueprintError( 'Invalid blueprint - {0}'.format(ex.args))
def get(self, blueprint_id, **kwargs): """ Download blueprint's archive """ # Verify blueprint exists. get_blueprints_manager().get_blueprint(blueprint_id, {"id"}) for arc_type in SUPPORTED_ARCHIVE_TYPES: # attempting to find the archive file on the file system local_path = os.path.join( config.instance().file_server_root, config.instance().file_server_uploaded_blueprints_folder, blueprint_id, "{0}.{1}".format(blueprint_id, arc_type), ) if os.path.isfile(local_path): archive_type = arc_type break else: raise RuntimeError("Could not find blueprint's archive; " "Blueprint ID: {0}".format(blueprint_id)) blueprint_path = "{0}/{1}/{2}/{2}.{3}".format( config.instance().file_server_resources_uri, config.instance().file_server_uploaded_blueprints_folder, blueprint_id, archive_type, ) return make_streaming_response(blueprint_id, blueprint_path, os.path.getsize(local_path), archive_type)
def get_connection(): """Return a connection to Cloudify manager's Elasticsearch """ if "es_connection" not in g: es_host = config.instance().db_address es_port = config.instance().db_port g.es_connection = elasticsearch.Elasticsearch(hosts=[{"host": es_host, "port": es_port}]) return g.es_connection
def get_connection(): """Return a connection to Cloudify manager's Elasticsearch.""" if 'es_connection' not in current_app.extensions: es_host = config.instance().db_address es_port = config.instance().db_port current_app.extensions['es_connection'] = \ elasticsearch.Elasticsearch( hosts=[{"host": es_host, "port": es_port}]) return current_app.extensions['es_connection']
def get_connection(): """Return a connection to Cloudify manager's Elasticsearch """ if 'es_connection' not in g: es_host = config.instance().db_address es_port = config.instance().db_port g.es_connection = elasticsearch.Elasticsearch( hosts=[{ "host": es_host, "port": es_port }]) return g.es_connection
def _get_rest_credentials(): if not current_app.config.get(SECURED_MODE): return {} if hasattr(current_app, 'auth_token_generator'): return {'rest_token': current_app.auth_token_generator.generate_auth_token()} return { 'rest_username': config.instance().security_rest_username, 'rest_password': config.instance().security_rest_password }
def get(self, snapshot_id): snap = get_blueprints_manager().get_snapshot(snapshot_id) if snap.status == models.Snapshot.FAILED: raise manager_exceptions.SnapshotActionError( 'Failed snapshot cannot be downloaded') snapshot_path = os.path.join(_get_snapshot_path(snapshot_id), '{0}.zip'.format(snapshot_id)) snapshot_uri = '{0}/{1}/{2}/{2}.zip'.format( config.instance().file_server_resources_uri, config.instance().file_server_snapshots_folder, snapshot_id) return make_streaming_response(snapshot_id, snapshot_uri, os.path.getsize(snapshot_path), 'zip')
def get(self, plugin_id, **kwargs): """ Download plugin archive """ # Verify plugin exists. plugin = get_blueprints_manager().get_plugin(plugin_id) archive_name = plugin.archive_name # attempting to find the archive file on the file system local_path = _get_plugin_archive_path(plugin_id, archive_name) if not os.path.isfile(local_path): raise RuntimeError("Could not find plugins archive; " "Plugin ID: {0}".format(plugin_id)) plugin_path = '{0}/{1}/{2}/{3}'.format( config.instance().file_server_resources_uri, 'plugins', plugin_id, archive_name) return make_streaming_response( plugin_id, plugin_path, os.path.getsize(local_path), 'tar.gz' )
def setUp(self): self.tmpdir = tempfile.mkdtemp() self.rest_service_log = tempfile.mkstemp()[1] self.securest_log_file = tempfile.mkstemp()[1] self.file_server = FileServer(self.tmpdir) self.maintenance_mode_dir = tempfile.mkdtemp() self.addCleanup(self.cleanup) self.file_server.start() storage_manager.storage_manager_module_name = \ STORAGE_MANAGER_MODULE_NAME # workaround for setting the rest service log path, since it's # needed when 'server' module is imported. # right after the import the log path is set normally like the rest # of the variables (used in the reset_state) tmp_conf_file = tempfile.mkstemp()[1] json.dump({'rest_service_log_path': self.rest_service_log, 'rest_service_log_file_size_MB': 1, 'rest_service_log_files_backup_count': 1, 'rest_service_log_level': 'DEBUG'}, open(tmp_conf_file, 'w')) os.environ['MANAGER_REST_CONFIG_PATH'] = tmp_conf_file try: from manager_rest import server finally: del(os.environ['MANAGER_REST_CONFIG_PATH']) self.server_configuration = self.create_configuration() server.reset_state(self.server_configuration) utils.copy_resources(config.instance().file_server_root) server.setup_app() server.app.config['Testing'] = True self.app = server.app.test_client() self.client = self.create_client() self.initialize_provider_context()
def setup_app(warnings=None): if warnings is None: warnings = [] app = Flask(__name__) cfy_config = config.instance() app.logger_name = 'manager-rest' # setting up the app logger with a rotating file handler, in addition to # the built-in flask logger which can be helpful in debug mode. create_logger(logger_name=app.logger.name, log_level=cfy_config.rest_service_log_level, log_file=cfy_config.rest_service_log_path, log_file_size_MB=cfy_config.rest_service_log_file_size_MB, log_files_backup_count=cfy_config. rest_service_log_files_backup_count) # log all warnings passed to function for w in warnings: app.logger.warning(w) # secure the app according to manager configuration if cfy_config.security_enabled: app.logger.info('initializing rest-service security') init_secured_app(app) app.before_request(log_request) app.after_request(log_response) # saving flask's original error handlers flask_handle_exception = app.handle_exception flask_handle_user_exception = app.handle_user_exception api = Api(app) # saving flask-restful's error handlers flask_restful_handle_exception = app.handle_exception flask_restful_handle_user_exception = app.handle_user_exception # setting it so that <500 codes use flask-restful's error handlers, # while 500+ codes use original flask's error handlers (for which we # register an error handler on somewhere else in this module) def handle_exception(flask_method, flask_restful_method, e): code = getattr(e, 'code', 500) if code >= 500: return flask_method(e) else: return flask_restful_method(e) app.handle_exception = functools.partial( handle_exception, flask_handle_exception, flask_restful_handle_exception) app.handle_user_exception = functools.partial( handle_exception, flask_handle_user_exception, flask_restful_handle_user_exception) endpoint_mapper.setup_resources(api) return app
def celery_client(): if config.instance().test_mode: from test.mocks import MockCeleryClient return MockCeleryClient() else: return CeleryClient()
def setUp(self): self.tmpdir = tempfile.mkdtemp() self.rest_service_log = tempfile.mkstemp()[1] self.securest_log_file = tempfile.mkstemp()[1] self.file_server = FileServer(self.tmpdir) self.addCleanup(self.cleanup) self.file_server.start() storage_manager.storage_manager_module_name = \ STORAGE_MANAGER_MODULE_NAME # workaround for setting the rest service log path, since it's # needed when 'server' module is imported. # right after the import the log path is set normally like the rest # of the variables (used in the reset_state) tmp_conf_file = tempfile.mkstemp()[1] json.dump({'rest_service_log_path': self.rest_service_log, 'rest_service_log_file_size_MB': 1, 'rest_service_log_files_backup_count': 1, 'rest_service_log_level': 'DEBUG'}, open(tmp_conf_file, 'w')) os.environ['MANAGER_REST_CONFIG_PATH'] = tmp_conf_file try: from manager_rest import server finally: del(os.environ['MANAGER_REST_CONFIG_PATH']) server.reset_state(self.create_configuration()) utils.copy_resources(config.instance().file_server_root) server.setup_app() server.app.config['Testing'] = True self.app = server.app.test_client() self.client = self.create_client() self.initialize_provider_context()
def setup_app(warnings=None): if warnings is None: warnings = [] app = Flask(__name__) cfy_config = config.instance() _detect_debug_environment() app.logger_name = 'manager-rest' # setting up the app logger with a rotating file handler, in addition to # the built-in flask logger which can be helpful in debug mode. create_logger( logger_name=app.logger.name, log_level=cfy_config.rest_service_log_level, log_file=cfy_config.rest_service_log_path, log_file_size_MB=cfy_config.rest_service_log_file_size_MB, log_files_backup_count=cfy_config.rest_service_log_files_backup_count) # log all warnings passed to function for w in warnings: app.logger.warning(w) # secure the app according to manager configuration if cfy_config.security_enabled: app.logger.info('initializing rest-service security') init_secured_app(app) app.before_request(log_request) app.after_request(log_response) # saving flask's original error handlers flask_handle_exception = app.handle_exception flask_handle_user_exception = app.handle_user_exception api = Api(app) # saving flask-restful's error handlers flask_restful_handle_exception = app.handle_exception flask_restful_handle_user_exception = app.handle_user_exception # setting it so that <500 codes use flask-restful's error handlers, # while 500+ codes use original flask's error handlers (for which we # register an error handler on somewhere else in this module) def handle_exception(flask_method, flask_restful_method, e): code = getattr(e, 'code', 500) if code >= 500: return flask_method(e) else: return flask_restful_method(e) app.handle_exception = functools.partial(handle_exception, flask_handle_exception, flask_restful_handle_exception) app.handle_user_exception = functools.partial( handle_exception, flask_handle_user_exception, flask_restful_handle_user_exception) endpoint_mapper.setup_resources(api) return app
def setUp(self): self.tmpdir = tempfile.mkdtemp() self.file_server = FileServer(self.tmpdir) self.file_server.start() storage_manager.storage_manager_module_name = \ STORAGE_MANAGER_MODULE_NAME server.reset_state(self.create_configuration()) util.copy_resources(config.instance().file_server_root) server.setup_app() server.app.config['Testing'] = True self.app = server.app.test_client()
def get(self, snapshot_id): get_blueprints_manager().get_snapshot(snapshot_id) snapshot_path = os.path.join( _get_snapshot_path(snapshot_id), '{0}.zip'.format(snapshot_id) ) snapshot_uri = '{0}/{1}/{2}/{2}.zip'.format( config.instance().file_server_resources_uri, config.instance().file_server_snapshots_folder, snapshot_id ) return make_streaming_response( snapshot_id, snapshot_uri, os.path.getsize(snapshot_path), 'zip' )
def delete(self, blueprint_id, **kwargs): """ Delete blueprint by id """ # Note: The current delete semantics are such that if a deployment # for the blueprint exists, the deletion operation will fail. # However, there is no handling of possible concurrency issue with # regard to that matter at the moment. blueprint = get_blueprints_manager().delete_blueprint(blueprint_id) # Delete blueprint resources from file server blueprint_folder = os.path.join( config.instance().file_server_root, config.instance().file_server_blueprints_folder, blueprint.id ) shutil.rmtree(blueprint_folder) uploaded_blueprint_folder = os.path.join( config.instance().file_server_root, config.instance().file_server_uploaded_blueprints_folder, blueprint.id ) shutil.rmtree(uploaded_blueprint_folder) return blueprint, 200
def _prepare_and_submit_blueprint(cls, file_server_root, app_dir, deployment_id, additional_inputs=None): app_dir, app_file_name = \ cls._extract_application_file(file_server_root, app_dir) # add to deployment update manager (will also dsl_parse it) try: cls._process_plugins(file_server_root, app_dir, deployment_id) update = get_deployment_updates_manager().stage_deployment_update( deployment_id, app_dir, app_file_name, additional_inputs=additional_inputs or {} ) # Moving the contents of the app dir to the dest dir, while # overwriting any file encountered # create the destination root dir file_server_deployment_root = \ os.path.join(file_server_root, config.instance().file_server_deployments_folder, deployment_id) app_root_dir = os.path.join(file_server_root, app_dir) for root, dirs, files in os.walk(app_root_dir): # Creates a corresponding dir structure in the deployment dir dest_rel_dir = os.path.relpath(root, app_root_dir) dest_dir = os.path.abspath( os.path.join(file_server_deployment_root, dest_rel_dir)) utils.mkdirs(dest_dir) # Calculate source dir source_dir = os.path.join(file_server_root, app_dir, root) for file_name in files: source_file = os.path.join(source_dir, file_name) relative_dest_path = os.path.relpath(source_file, app_root_dir) dest_file = os.path.join(file_server_deployment_root, relative_dest_path) shutil.copy(source_file, dest_file) return update except Exception: shutil.rmtree(os.path.join(file_server_root, app_dir)) raise
def setup_app(): app = Flask(__name__) # setting up the app logger with a rotating file handler, in addition to # the built-in flask logger which can be helpful in debug mode. additional_log_handlers = [ RotatingFileHandler( config.instance().rest_service_log_path, maxBytes=1024*1024*100, backupCount=20) ] app.logger_name = 'manager-rest' setup_logger(logger_name=app.logger.name, logger_level=logging.DEBUG, handlers=additional_log_handlers, remove_existing_handlers=False) app.before_request(log_request) app.after_request(log_response) # saving flask's original error handlers flask_handle_exception = app.handle_exception flask_handle_user_exception = app.handle_user_exception api = Api(app) # saving flask-restful's error handlers flask_restful_handle_exception = app.handle_exception flask_restful_handle_user_exception = app.handle_user_exception # setting it so that <500 codes use flask-restful's error handlers, # while 500+ codes use original flask's error handlers (for which we # register an error handler on somewhere else in this module) def handle_exception(flask_method, flask_restful_method, e): code = getattr(e, 'code', 500) if code >= 500: return flask_method(e) else: return flask_restful_method(e) app.handle_exception = functools.partial( handle_exception, flask_handle_exception, flask_restful_handle_exception) app.handle_user_exception = functools.partial( handle_exception, flask_handle_user_exception, flask_restful_handle_user_exception) resources.setup_resources(api) return app
def delete(self, blueprint_id): """ Delete blueprint by id """ # Note: The current delete semantics are such that if a deployment # for the blueprint exists, the deletion operation will fail. # However, there is no handling of possible concurrency issue with # regard to that matter at the moment. blueprint = get_blueprints_manager().delete_blueprint(blueprint_id) # Delete blueprint resources from file server blueprint_folder = os.path.join( config.instance().file_server_root, config.instance().file_server_blueprints_folder, blueprint.id) shutil.rmtree(blueprint_folder) uploaded_blueprint_folder = os.path.join( config.instance().file_server_root, config.instance().file_server_uploaded_blueprints_folder, blueprint.id) shutil.rmtree(uploaded_blueprint_folder) return responses.BlueprintState(**blueprint.to_dict()), 200
def load_configuration(): obj_conf = config.instance() def load_config(env_var_name): if env_var_name in os.environ: with open(os.environ[env_var_name]) as f: yaml_conf = yaml.load(f.read()) for key, value in yaml_conf.iteritems(): if hasattr(obj_conf, key): setattr(obj_conf, key, value) load_config('MANAGER_REST_CONFIG_PATH') load_config('MANAGER_REST_SECURITY_CONFIG_PATH')
def _prepare_and_submit_blueprint(cls, file_server_root, app_dir, deployment_id, additional_inputs=None): app_dir, app_file_name = \ cls._extract_application_file(file_server_root, app_dir) # add to deployment update manager (will also dsl_parse it) try: cls._process_plugins(file_server_root, app_dir, deployment_id) update = get_deployment_updates_manager().stage_deployment_update( deployment_id, app_dir, app_file_name, additional_inputs=additional_inputs or {}) # Moving the contents of the app dir to the dest dir, while # overwriting any file encountered # create the destination root dir file_server_deployment_root = \ os.path.join(file_server_root, config.instance().file_server_deployments_folder, deployment_id) app_root_dir = os.path.join(file_server_root, app_dir) for root, dirs, files in os.walk(app_root_dir): # Creates a corresponding dir structure in the deployment dir dest_rel_dir = os.path.relpath(root, app_root_dir) dest_dir = os.path.abspath( os.path.join(file_server_deployment_root, dest_rel_dir)) utils.mkdirs(dest_dir) # Calculate source dir source_dir = os.path.join(file_server_root, app_dir, root) for file_name in files: source_file = os.path.join(source_dir, file_name) relative_dest_path = os.path.relpath( source_file, app_root_dir) dest_file = os.path.join(file_server_deployment_root, relative_dest_path) shutil.copy(source_file, dest_file) return update except Exception: shutil.rmtree(os.path.join(file_server_root, app_dir)) raise
def stage_deployment_update(self, deployment_id, app_dir, app_blueprint, additional_inputs): """Stage a deployment update :param app_blueprint: :param app_dir: :param deployment_id: the deployment id for the update :return: """ # enables reverting to original blueprint resources deployment = self.sm.get_deployment(deployment_id) blueprint_id = deployment.blueprint_id # enables reverting to original blueprint resources file_server_base_url = \ '{0}/'.format(config.instance().file_server_base_uri) blueprint_resource_dir = os.path.join(file_server_base_url, 'blueprints', blueprint_id) app_path = os.path.join(file_server_base_url, app_dir, app_blueprint) # parsing the blueprint from here try: plan = tasks.parse_dsl( app_path, resources_base_url=file_server_base_url, additional_resources=[blueprint_resource_dir], **app_context.get_parser_context()) except parser_exceptions.DSLParsingException as ex: raise manager_exceptions.InvalidBlueprintError( 'Invalid blueprint - {0}'.format(ex)) # Updating the new inputs with the deployment inputs # (overriding old values and adding new ones) inputs = copy.deepcopy(deployment.inputs) inputs.update(additional_inputs) # applying intrinsic functions try: prepared_plan = tasks.prepare_deployment_plan(plan, inputs=inputs) except parser_exceptions.MissingRequiredInputError, e: raise manager_exceptions.MissingRequiredDeploymentInputError( str(e))
def _prepare_and_submit_blueprint(cls, file_server_root, app_dir, blueprint_id): app_dir, app_file_name = cls._extract_application_file(file_server_root, app_dir) file_server_base_url = "{0}/".format(config.instance().file_server_base_uri) # add to blueprints manager (will also dsl_parse it) try: blueprint = get_blueprints_manager().publish_blueprint( app_dir, app_file_name, file_server_base_url, blueprint_id ) # moving the app directory in the file server to be under a # directory named after the blueprint id shutil.move( os.path.join(file_server_root, app_dir), os.path.join(file_server_root, config.instance().file_server_blueprints_folder, blueprint.id), ) cls._process_plugins(file_server_root, blueprint.id) return blueprint except DslParseException, ex: shutil.rmtree(os.path.join(file_server_root, app_dir)) raise manager_exceptions.InvalidBlueprintError("Invalid blueprint - {0}".format(ex.message))
def do_request(self, blueprint_id): file_server_root = config.instance().file_server_root archive_target_path = tempfile.mktemp(dir=file_server_root) try: self._save_file_locally(archive_target_path) application_dir = self._extract_file_to_file_server( file_server_root, archive_target_path) blueprint = self._prepare_and_submit_blueprint( file_server_root, application_dir, blueprint_id) self._move_archive_to_uploaded_blueprints_dir( blueprint.id, file_server_root, archive_target_path) return blueprint, 201 finally: if os.path.exists(archive_target_path): os.remove(archive_target_path)
def _move_archive_to_uploaded_blueprints_dir(blueprint_id, file_server_root, archive_path): if not os.path.exists(archive_path): raise RuntimeError("Archive [{0}] doesn't exist - Cannot move " "archive to uploaded blueprints " "directory".format(archive_path)) uploaded_blueprint_dir = os.path.join( file_server_root, config.instance().file_server_uploaded_blueprints_folder, blueprint_id) os.makedirs(uploaded_blueprint_dir) archive_file_name = '{0}.tar.gz'.format(blueprint_id) shutil.move(archive_path, os.path.join(uploaded_blueprint_dir, archive_file_name))
def load_configuration(): obj_conf = config.instance() def load_config(env_var_name, namespace=''): if env_var_name in os.environ: with open(os.environ[env_var_name]) as f: yaml_conf = yaml.safe_load(f.read()) for key, value in yaml_conf.iteritems(): config_key = '{0}_{1}'.format(namespace, key) if namespace \ else key if hasattr(obj_conf, config_key): setattr(obj_conf, config_key, value) load_config('MANAGER_REST_CONFIG_PATH') load_config('MANAGER_REST_SECURITY_CONFIG_PATH', 'security')
def get(self, snapshot_id): snap = get_blueprints_manager().get_snapshot(snapshot_id) if snap.status == models.Snapshot.FAILED: raise manager_exceptions.SnapshotActionError( 'Failed snapshot cannot be downloaded' ) snapshot_path = os.path.join( _get_snapshot_path(snapshot_id), '{0}.zip'.format(snapshot_id) ) snapshot_uri = '{0}/{1}/{2}/{2}.zip'.format( config.instance().file_server_resources_uri, config.instance().file_server_snapshots_folder, snapshot_id ) return make_streaming_response( snapshot_id, snapshot_uri, os.path.getsize(snapshot_path), 'zip' )
def receive_uploaded_data(self, data_id): file_server_root = config.instance().file_server_root archive_target_path = tempfile.mktemp(dir=file_server_root) try: self._save_file_locally(archive_target_path) doc, dest_file_name = self._prepare_and_process_doc( data_id, file_server_root, archive_target_path) self._move_archive_to_uploaded_dir(doc.id, file_server_root, archive_target_path, dest_file_name=dest_file_name) return doc, 201 finally: if os.path.exists(archive_target_path): os.remove(archive_target_path)
def _get_conf_for_snapshots_wf(self): return { 'file_server_root': config.instance().file_server_root, 'file_server_snapshots_folder': config.instance().file_server_snapshots_folder, 'file_server_blueprints_folder': config.instance().file_server_blueprints_folder, 'file_server_deployments_folder': config.instance().file_server_deployments_folder, 'file_server_uploaded_blueprints_folder': config.instance().file_server_uploaded_blueprints_folder, 'db_address': config.instance().db_address, 'db_port': config.instance().db_port, 'created_status': models.Snapshot.CREATED, 'failed_status': models.Snapshot.FAILED, 'file_server_uploaded_plugins_folder': config.instance().file_server_uploaded_plugins_folder }
def post(self, maintenance_action, **kwargs): maintenance_file_path = get_maintenance_file_path() if maintenance_action == 'activate': if os.path.isfile(maintenance_file_path): return {'status': MAINTENANCE_MODE_ACTIVE}, 304 utils.mkdirs(config.instance().maintenance_folder) write_maintenance_state(ACTIVATING_MAINTENANCE_MODE) return {'status': ACTIVATING_MAINTENANCE_MODE} if maintenance_action == 'deactivate': if not os.path.isfile(maintenance_file_path): return {'status': NOT_IN_MAINTENANCE_MODE}, 304 os.remove(maintenance_file_path) return {'status': NOT_IN_MAINTENANCE_MODE}
def do_request(self, blueprint_id): file_server_root = config.instance().file_server_root archive_target_path = tempfile.mktemp(dir=file_server_root) try: self._save_file_locally(archive_target_path) application_dir = self._extract_file_to_file_server( file_server_root, archive_target_path) blueprint = self._prepare_and_submit_blueprint(file_server_root, application_dir, blueprint_id) self._move_archive_to_uploaded_blueprints_dir(blueprint.id, file_server_root, archive_target_path) return blueprint, 201 finally: if os.path.exists(archive_target_path): os.remove(archive_target_path)
def handle_maintenance_mode(): allowed_endpoints = ['maintenance', 'status', 'version'] # Removing v*/ from the endpoint index = request.endpoint.find('/') request_endpoint = request.endpoint[index+1:] for endpoint in allowed_endpoints: if request_endpoint.startswith(endpoint): return maintenance_file = os.path.join( config.instance().maintenance_folder, MAINTENANCE_MODE_STATUS_FILE) if os.path.isfile(maintenance_file): with open(maintenance_file, 'r') as f: status = f.read() if status == MAINTENANCE_MODE_ACTIVE: return maintenance_mode_error() if status == ACTIVATING_MAINTENANCE_MODE: forbidden_requests = ['POST', 'PATCH', 'PUT'] if request_endpoint == 'snapshots/<string:snapshot_id>': if request.method in forbidden_requests: return activating_maintenance_mode_error() if request_endpoint == 'snapshots/<string:snapshot_id>/restore': return activating_maintenance_mode_error() if request_endpoint == 'executions': if request.method in forbidden_requests: return activating_maintenance_mode_error() if request_endpoint == 'deployments/<string:deployment_id>': if request.method in forbidden_requests: return activating_maintenance_mode_error() if request_endpoint == 'deployment-modifications': if request.method in forbidden_requests: return activating_maintenance_mode_error()
def setUp(self): self.tmpdir = tempfile.mkdtemp() self.file_server = FileServer(self.tmpdir) self.file_server.start() storage_manager.storage_manager_module_name = \ STORAGE_MANAGER_MODULE_NAME server.reset_state(self.create_configuration()) util.copy_resources(config.instance().file_server_root) server.setup_app() server.app.config['Testing'] = True self.app = server.app.test_client() self.client = CloudifyClient('localhost') mock_http_client = MockHTTPClient(self.app) self.client.blueprints.api = mock_http_client self.client.deployments.api = mock_http_client self.client.executions.api = mock_http_client self.client.nodes.api = mock_http_client self.client.node_instances.api = mock_http_client self.client.manager.api = mock_http_client
def setUp(self): self.tmpdir = tempfile.mkdtemp() self.file_server = FileServer(self.tmpdir) self.file_server.start() storage_manager.storage_manager_module_name = \ STORAGE_MANAGER_MODULE_NAME server.reset_state(self.create_configuration()) util.copy_resources(config.instance().file_server_root) server.setup_app() server.app.config['Testing'] = True self.app = server.app.test_client() self.client = CloudifyClient('localhost') mock_http_client = MockHTTPClient(self.app) self.client.blueprints.api = mock_http_client self.client.deployments.api = mock_http_client self.client.deployments.outputs.api = mock_http_client self.client.executions.api = mock_http_client self.client.nodes.api = mock_http_client self.client.node_instances.api = mock_http_client self.client.manager.api = mock_http_client
def maintenance_mode_handler(): # failed to route the request - this is a 404. Abort early. if not request.endpoint: return # enabling internal requests if _is_internal_request() and is_bypass_maintenance_mode(): return # Removing v*/ from the endpoint index = request.endpoint.find('/') request_endpoint = request.endpoint[index+1:] maintenance_file = os.path.join( config.instance().maintenance_folder, MAINTENANCE_MODE_STATUS_FILE) if os.path.isfile(maintenance_file): state = utils.read_json_file(maintenance_file) if state['status'] == MAINTENANCE_MODE_ACTIVATING: running_executions = get_running_executions() if not running_executions: now = utils.get_formatted_timestamp() state = prepare_maintenance_dict( MAINTENANCE_MODE_ACTIVATED, activated_at=now, remaining_executions=None, requested_by=state['requested_by'], activation_requested_at=state[ 'activation_requested_at']) utils.write_dict_to_json_file(maintenance_file, state) else: return _handle_activating_mode( state=state, request_endpoint=request_endpoint) if _check_allowed_endpoint(request_endpoint): return if state['status'] == MAINTENANCE_MODE_ACTIVATED: return _maintenance_mode_error()
def handle_maintenance_mode(): allowed_endpoints = ['maintenance', 'status', 'version'] # Removing v*/ from the endpoint index = request.endpoint.find('/') request_endpoint = request.endpoint[index + 1:] for endpoint in allowed_endpoints: if request_endpoint.startswith(endpoint): return maintenance_file = os.path.join(config.instance().maintenance_folder, MAINTENANCE_MODE_STATUS_FILE) if os.path.isfile(maintenance_file): with open(maintenance_file, 'r') as f: status = f.read() if status == MAINTENANCE_MODE_ACTIVE: return maintenance_mode_error() if status == ACTIVATING_MAINTENANCE_MODE: forbidden_requests = ['POST', 'PATCH', 'PUT'] if request_endpoint == 'snapshots/<string:snapshot_id>': if request.method in forbidden_requests: return activating_maintenance_mode_error() if request_endpoint == 'snapshots/<string:snapshot_id>/restore': return activating_maintenance_mode_error() if request_endpoint == 'executions': if request.method in forbidden_requests: return activating_maintenance_mode_error() if request_endpoint == 'deployments/<string:deployment_id>': if request.method in forbidden_requests: return activating_maintenance_mode_error() if request_endpoint == 'deployment-modifications': if request.method in forbidden_requests: return activating_maintenance_mode_error()
def load_configuration(): obj_conf = config.instance() def load_config(env_var_name, namespace=""): warnings = [] if env_var_name in os.environ: with open(os.environ[env_var_name]) as f: yaml_conf = yaml.safe_load(f.read()) for key, value in yaml_conf.iteritems(): config_key = "{0}_{1}".format(namespace, key) if namespace else key if hasattr(obj_conf, config_key): setattr(obj_conf, config_key, value) else: warnings.append( "Ignoring unknown key '{0}' in configuration" "file '{1}'".format(key, os.environ[env_var_name]) ) return warnings warnings = load_config("MANAGER_REST_CONFIG_PATH") warnings.extend(load_config("MANAGER_REST_SECURITY_CONFIG_PATH", "security")) return warnings
def receive_uploaded_data(self, data_id): file_server_root = config.instance().file_server_root resource_target_path = tempfile.mktemp(dir=file_server_root) try: additional_inputs = self._save_file_locally_and_extract_inputs( resource_target_path, self._get_data_url_key(), self._get_kind()) doc, dest_file_name = self._prepare_and_process_doc( data_id, file_server_root, resource_target_path, additional_inputs=additional_inputs) self._move_archive_to_uploaded_dir(doc.id, file_server_root, resource_target_path, dest_file_name=dest_file_name) return doc, 201 finally: if os.path.exists(resource_target_path): os.remove(resource_target_path)
def maintenance_mode_handler(): # failed to route the request - this is a 404. Abort early. if not request.endpoint: return # enabling internal requests if _is_internal_request() and is_bypass_maintenance_mode(): return # Removing v*/ from the endpoint index = request.endpoint.find('/') request_endpoint = request.endpoint[index + 1:] maintenance_file = os.path.join(config.instance().maintenance_folder, MAINTENANCE_MODE_STATUS_FILE) if os.path.isfile(maintenance_file): state = utils.read_json_file(maintenance_file) if state['status'] == MAINTENANCE_MODE_ACTIVATING: running_executions = get_running_executions() if not running_executions: now = str(datetime.now()) state = prepare_maintenance_dict( MAINTENANCE_MODE_ACTIVATED, activated_at=now, remaining_executions=None, requested_by=state['requested_by'], activation_requested_at=state['activation_requested_at']) utils.write_dict_to_json_file(maintenance_file, state) else: return _handle_activating_mode( state=state, request_endpoint=request_endpoint) if _check_allowed_endpoint(request_endpoint): return if state['status'] == MAINTENANCE_MODE_ACTIVATED: return _maintenance_mode_error()
def load_configuration(): obj_conf = config.instance() def load_config(env_var_name, namespace=''): warnings = [] if env_var_name in os.environ: with open(os.environ[env_var_name]) as f: yaml_conf = yaml.safe_load(f.read()) for key, value in yaml_conf.iteritems(): config_key = '{0}_{1}'.format(namespace, key) if namespace \ else key if hasattr(obj_conf, config_key): setattr(obj_conf, config_key, value) else: warnings.append( "Ignoring unknown key '{0}' in configuration" "file '{1}'".format(key, os.environ[env_var_name])) return warnings warnings = load_config('MANAGER_REST_CONFIG_PATH') warnings.extend( load_config('MANAGER_REST_SECURITY_CONFIG_PATH', 'security')) return warnings
def post(self, maintenance_action, **_): maintenance_file_path = get_maintenance_file_path() if maintenance_action == 'activate': if os.path.isfile(maintenance_file_path): state = utils.read_json_file(maintenance_file_path) return state, 304 now = str(datetime.now()) try: user = rest_security.get_username() except AttributeError: user = '' remaining_executions = get_running_executions() utils.mkdirs(config.instance().maintenance_folder) new_state = prepare_maintenance_dict( status=MAINTENANCE_MODE_ACTIVATING, activation_requested_at=now, remaining_executions=remaining_executions, requested_by=user) utils.write_dict_to_json_file(maintenance_file_path, new_state) return new_state if maintenance_action == 'deactivate': if not os.path.isfile(maintenance_file_path): return prepare_maintenance_dict( MAINTENANCE_MODE_DEACTIVATED), 304 os.remove(maintenance_file_path) return prepare_maintenance_dict(MAINTENANCE_MODE_DEACTIVATED) valid_actions = ['activate', 'deactivate'] raise BadParametersError('Invalid action: {0}, Valid action ' 'values are: {1}'.format( maintenance_action, valid_actions))
def __init__(self): ssl_settings = self._get_broker_ssl_settings( ssl_enabled=config.instance().amqp_ssl_enabled, cert_path=config.instance().amqp_ca_path, ) # Port not required as currently the address is provided with port and # vhost included. amqp_uri = 'amqp://{username}:{password}@{address}'.format( address=config.instance().amqp_address, username=config.instance().amqp_username, password=config.instance().amqp_password, ) self.celery = Celery(broker=amqp_uri, backend=amqp_uri) self.celery.conf.update(CELERY_TASK_SERIALIZER="json", CELERY_TASK_RESULT_EXPIRES=600) if config.instance().amqp_ssl_enabled: self.celery.conf.update(BROKER_USE_SSL=ssl_settings)
def get_client(): if config.instance().test_mode: from test.mocks import MockCeleryClient return MockCeleryClient() else: return CeleryClient()
def _get_plugin_archive_path(plugin_id, archive_name): return os.path.join(config.instance().file_server_uploaded_plugins_folder, plugin_id, archive_name)
def _get_target_dir_path(self): return config.instance().file_server_snapshots_folder
def _get_target_dir_path(self): return config.instance().file_server_uploaded_plugins_folder
def create(): return ESStorageManager( config.instance().db_address, config.instance().db_port )
def _get_snapshot_path(snapshot_id): return os.path.join( config.instance().file_server_root, config.instance().file_server_snapshots_folder, snapshot_id )