def patch(self, node_instance_id, **kwargs): """Update node instance by id.""" request_dict = get_json_and_verify_params( {'version': {'type': int}} ) if not isinstance(request.json, collections.Mapping): raise manager_exceptions.BadParametersError( 'Request body is expected to be a map containing a "version" ' 'field and optionally "runtimeProperties" and/or "state" ' 'fields') # Added for backwards compatibility with older client versions that # had version=0 by default version = request_dict['version'] or 1 instance = get_storage_manager().get( models.NodeInstance, node_instance_id, locking=True ) if instance.version > version: raise manager_exceptions.ConflictError( 'Node instance update conflict [current version={0}, ' 'update version={1}]'.format(instance.version, version) ) # Only update if new values were included in the request instance.runtime_properties = request_dict.get( 'runtime_properties', instance.runtime_properties ) instance.state = request_dict.get('state', instance.state) return get_storage_manager().update(instance)
def patch(self, snapshot_id): """Update snapshot status by id """ request_dict = rest_utils.get_json_and_verify_params({'status'}) snapshot = get_storage_manager().get(models.Snapshot, snapshot_id) snapshot.status = request_dict['status'] snapshot.error = request_dict.get('error', '') get_storage_manager().update(snapshot)
def patch(self, key): """ Update an existing secret """ rest_utils.validate_inputs({'key': key}) if not request.json: raise IllegalActionError('Update a secret request must include at ' 'least one parameter to update') secret = get_storage_manager().get(models.Secret, key) self._validate_secret_modification_permitted(secret) self._update_is_hidden_value(secret) self._update_visibility(secret) self._update_value(secret) secret.updated_at = utils.get_formatted_timestamp() return get_storage_manager().update(secret, validate_global=True)
def get(self, plugin_id, **kwargs): """ Download plugin archive """ # Verify plugin exists. plugin = get_storage_manager().get(models.Plugin, plugin_id) archive_name = plugin.archive_name # attempting to find the archive file on the file system local_path = utils.get_plugin_archive_path(plugin_id, archive_name) if not os.path.isfile(local_path): raise RuntimeError("Could not find plugins archive; " "Plugin ID: {0}".format(plugin_id)) plugin_path = '{0}/{1}/{2}/{3}'.format( FILE_SERVER_RESOURCES_FOLDER, FILE_SERVER_PLUGINS_FOLDER, plugin_id, archive_name) return rest_utils.make_streaming_response( plugin_id, plugin_path, os.path.getsize(local_path), 'tar.gz' )
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): """ List deployments """ get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) result = get_storage_manager().list( models.Deployment, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results ) if _include and 'workflows' in _include: # Because we coerce this into a list in the model, but our ORM # won't return a model instance when filtering results, we have # to coerce this here as well. This is unpleasant. for index, item in enumerate(result.items): r = item._asdict() r['workflows'] = models.Deployment._list_workflows( r['workflows'], ) result.items[index] = r return result
def delete(self, snapshot_id): sm = get_storage_manager() snapshot = sm.get(models.Snapshot, snapshot_id) sm.delete(snapshot) path = _get_snapshot_path(snapshot_id) shutil.rmtree(path, ignore_errors=True) return snapshot, 200
def put(self, key, **kwargs): """ Create a new secret or update an existing secret if the flag update_if_exists is set to true """ secret_params = self._get_secret_params(key) encrypted_value = encrypt(secret_params['value']) sm = get_storage_manager() timestamp = utils.get_formatted_timestamp() try: new_secret = models.Secret( id=key, value=encrypted_value, created_at=timestamp, updated_at=timestamp, visibility=secret_params['visibility'], is_hidden_value=secret_params['is_hidden_value'] ) return sm.put(new_secret) except ConflictError: secret = sm.get(models.Secret, key) if secret and secret_params['update_if_exists']: secret.value = encrypted_value secret.updated_at = timestamp return sm.update(secret, validate_global=True) raise
def post(self): """Add a broker to the database.""" broker = rest_utils.get_json_and_verify_params({ 'name': {'type': unicode}, 'address': {'type': unicode}, 'port': {'type': int, 'optional': True}, 'networks': {'type': dict, 'optional': True}, }) sm = get_storage_manager() # Get the first broker in the list to get the ca_cert and credentials first_broker = sm.list(models.RabbitMQBroker).items[0] if broker.get('networks'): check_private_address_is_in_networks( broker['address'], broker['networks'], ) else: broker['networks'] = {'default': broker['address']} new_broker = models.RabbitMQBroker( name=broker['name'], host=broker['address'], management_host=broker['address'], port=broker.get('port'), networks=broker['networks'], username=first_broker.username, password=first_broker.password, _ca_cert_id=first_broker._ca_cert_id, ) result = sm.put(new_broker) current_app.logger.info('Broker added successfully') update_agents(sm) return result
def from_storage(cls, deployment_id, deployment_update): """ Create a DeploymentPlan from a stored deployment""" sm = get_storage_manager() if deployment_update: # get deployment and blueprint from deployment update deployment = deployment_update.deployment blueprint_plan = deployment_update.old_blueprint.plan else: # get deployment from storage deployment = sm.get(models.Deployment, deployment_id) blueprint_plan = deployment.blueprint.plan deployment_plugins_to_install = \ blueprint_plan['deployment_plugins_to_install'] workflow_plugins_to_install = \ blueprint_plan['workflow_plugins_to_install'] # get the nodes from the storage nodes = sm.list(models.Node, filters={'deployment_id': [deployment_id]}, get_all_results=True) nodes = {node.id: node.to_dict() for node in nodes} return cls(deployment.to_dict(), nodes, deployment_plugins_to_install, workflow_plugins_to_install)
def get(self, multi_tenancy=None, _include=None, filters=None, pagination=None, sort=None, search=None, **kwargs): """ List tenants """ @authorize('tenant_list_get_data') def _authorize_with_get_data(): pass if rest_utils.verify_and_convert_bool( 'get_data', request.args.get('_get_data', False)): _authorize_with_get_data() get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) if multi_tenancy: return multi_tenancy.list_tenants(_include, filters, pagination, sort, search, get_all_results) # In community edition we have only the `default_tenant`, so it # should be safe to return it like this return get_storage_manager().list(models.Tenant)
def get(self, _include=None, **kwargs): """ List uploaded blueprints """ return get_storage_manager().list( models.Blueprint, include=_include).items
def get(self, _include=None, **kwargs): """ List nodes """ args = get_args_and_verify_arguments( [Argument('deployment_id', required=False), Argument('node_id', required=False)] ) deployment_id = args.get('deployment_id') node_id = args.get('node_id') if deployment_id and node_id: try: nodes = [get_node(deployment_id, node_id)] except manager_exceptions.NotFoundError: nodes = [] else: deployment_id_filter = ResourceManager.create_filters_dict( deployment_id=deployment_id) nodes = get_storage_manager().list( models.Node, filters=deployment_id_filter, include=_include ).items return nodes
def delete(self, name): """ Delete an existing site """ storage_manager = get_storage_manager() site = storage_manager.get(models.Site, name) return storage_manager.delete(site, validate_global=True)
def get(self, blueprint_id, **kwargs): """ Download blueprint's archive """ blueprint = get_storage_manager().get(models.Blueprint, blueprint_id) for arc_type in SUPPORTED_ARCHIVE_TYPES: # attempting to find the archive file on the file system local_path = os.path.join( config.instance.file_server_root, FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, blueprint.tenant.name, blueprint.id, '{0}.{1}'.format(blueprint.id, arc_type)) if os.path.isfile(local_path): archive_type = arc_type break else: raise RuntimeError("Could not find blueprint's archive; " "Blueprint ID: {0}".format(blueprint.id)) blueprint_path = '{0}/{1}/{2}/{3}/{3}.{4}'.format( FILE_SERVER_RESOURCES_FOLDER, FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, blueprint.tenant.name, blueprint.id, archive_type) return make_streaming_response( blueprint.id, blueprint_path, os.path.getsize(local_path), archive_type )
def _create_agent(self, name, state, request_dict): timestamp = utils.get_formatted_timestamp() rabbitmq_password = request_dict.get('rabbitmq_password') rabbitmq_password = encrypt(rabbitmq_password) if rabbitmq_password \ else rabbitmq_password # TODO: remove these fields from the runtime properties new_agent = models.Agent( id=name, name=name, ip=request_dict.get('ip'), install_method=request_dict.get('install_method'), system=request_dict.get('system'), state=state, version=request_dict.get('version'), rabbitmq_username=request_dict.get('rabbitmq_username'), rabbitmq_password=rabbitmq_password, rabbitmq_exchange=request_dict.get('rabbitmq_exchange'), created_at=timestamp, updated_at=timestamp, ) storage_manager = get_storage_manager() node_instance = storage_manager.get( models.NodeInstance, request_dict.get('node_instance_id') ) new_agent.node_instance = node_instance return storage_manager.put(new_agent)
def put(self, name): """ Create a new agent """ request_dict = get_json_and_verify_params({ 'node_instance_id': {'type': unicode}, 'state': {'type': unicode}, 'create_rabbitmq_user': {'type': bool} }) validate_inputs({'name': name}) state = request_dict.get('state') self._validate_state(state) response = {} try: new_agent = self._create_agent(name, state, request_dict) response = new_agent except manager_exceptions.ConflictError: # Assuming the agent was already created in cases of reinstalling # or healing current_app.logger.info("Not creating agent {0} because it " "already exists".format(name)) new_agent = get_storage_manager().get(models.Agent, name) if request_dict.get('create_rabbitmq_user'): # Create rabbitmq user self._get_amqp_manager().create_agent_user(new_agent) return response
def mock_execute_task(execution_id, **_): sm = get_storage_manager() execution = sm.get(models.Execution, execution_id) execution.status = task_state() execution.ended_at = utils.get_formatted_timestamp() execution.error = '' sm.update(execution)
def get(self, pagination=None): """List brokers from the database.""" brokers = get_storage_manager().list(models.RabbitMQBroker) if not is_user_action_allowed('broker_credentials'): for broker in brokers: broker.username = None broker.password = None return brokers
def get_parser_context(sm=None): sm = sm or get_storage_manager() if not hasattr(current_app, 'parser_context'): update_parser_context(sm.get( ProviderContext, PROVIDER_CONTEXT_ID ).context) return current_app.parser_context
def patch(self, operation_id, **kwargs): request_dict = get_json_and_verify_params( {'state': {'type': unicode}} ) sm = get_storage_manager() instance = sm.get(models.Operation, operation_id, locking=True) instance.state = request_dict.get('state', instance.state) return sm.update(instance)
def get_current_execution_by_token(execution_token): sm = get_storage_manager() token_filter = {models.Execution.token: hashlib.sha256(execution_token).hexdigest()} executions = sm.full_access_list(models.Execution, filters=token_filter) if len(executions) != 1: # Only one execution should match the token return None return executions[0]
def _get_items(self, scope=None): sm = get_storage_manager() if scope: filters = {'scope': scope} else: filters = None return [item.to_dict() for item in sm.list(models.Config, filters=filters).items]
def delete(self, name): """ Delete a manager from the database """ sm = get_storage_manager() manager_to_delete = sm.get( models.Manager, None, filters={'hostname': name} ) result = sm.delete(manager_to_delete) current_app.logger.info('Manager deleted successfully') managers_list = get_storage_manager().list(models.Manager) remove_manager(managers_list) # Removing manager from cluster update_agents(sm) return result
def delete(self, key): """ Delete a secret """ rest_utils.validate_inputs({'key': key}) storage_manager = get_storage_manager() secret = storage_manager.get(models.Secret, key) self._validate_secret_modification_permitted(secret) return storage_manager.delete(secret, validate_global=True)
def get(self, blueprint_id, _include=None, **kwargs): """ Get blueprint by id """ return get_storage_manager().get( models.Blueprint, blueprint_id, _include )
def get(self, node_instance_id, _include=None, **kwargs): """ Get node instance by id """ return get_storage_manager().get( models.NodeInstance, node_instance_id, include=_include )
def generate_execution_token(execution_id): sm = get_storage_manager() execution = sm.get(models.Execution, execution_id) execution_token = uuid.uuid4().hex # Store the token hashed in the DB execution.token = hashlib.sha256(execution_token).hexdigest() sm.update(execution) return execution_token
def get(self, execution_id, _include=None, **kwargs): """ Get execution by id """ return get_storage_manager().get( models.Execution, execution_id, include=_include )
def get(self, user_id): """ Get token by user id """ sm = get_storage_manager() user = sm.get(models.User, user_id) token = user.get_auth_token() return dict(username=user.username, value=token, role=user.role)
def get(self, deployment_id, _include=None, **kwargs): """ Get deployment by id """ return get_storage_manager().get( models.Deployment, deployment_id, include=_include )
def get(self, _include=None, pagination=None, **kwargs): args = get_args_and_verify_arguments([ Argument('execution_id', type=unicode, required=True), Argument('name', type=unicode, required=True) ]) sm = get_storage_manager() execution_id = args.get('execution_id') name = args.get('name') execution = sm.list(models.Execution, filters={'id': execution_id})[0] return sm.list(models.TasksGraph, filters={ 'execution': execution, 'name': name }, pagination=pagination)
def get(self, _include=None, filters=None, pagination=None, sort=None, **kwargs): """ List executions """ deployment_id = request.args.get('deployment_id') if deployment_id: get_storage_manager().get(models.Deployment, deployment_id, include=['id']) is_include_system_workflows = rest_utils.verify_and_convert_bool( '_include_system_workflows', request.args.get('_include_system_workflows', 'false')) return get_resource_manager().list_executions( filters=filters, pagination=pagination, sort=sort, is_include_system_workflows=is_include_system_workflows, include=_include)
def delete(self): """ Delete a manager from the database """ _manager = rest_utils.get_json_and_verify_params( {'hostname': { 'type': unicode }}) sm = get_storage_manager() manager_to_delete = sm.get(models.Manager, None, filters={'hostname': _manager['hostname']}) # TODO: send message on service-queue return sm.delete(manager_to_delete)
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None): get_all_results = verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) return get_storage_manager().list( models.ExecutionGroup, include=_include, filters=filters, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results )
def get(self, _include=None, **kwargs): """ List node instances """ args = get_args_and_verify_arguments([ Argument('deployment_id', type=str, required=False), Argument('node_name', type=str, required=False) ]) deployment_id = args.get('deployment_id') node_id = args.get('node_name') params_filter = ResourceManager.create_filters_dict( deployment_id=deployment_id, node_id=node_id) return get_storage_manager().list(models.NodeInstance, filters=params_filter, include=_include).items
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, **kwargs): """ List uploaded vims """ current_app.logger.info('get vims list') return get_storage_manager().list( models.Vim, include=_include, filters=filters, pagination=pagination, sort=sort, all_tenants=all_tenants )
def get(self, key): """ Get secret by key """ rest_utils.validate_inputs({'key': key}) secret = get_storage_manager().get(models.Secret, key) secret_dict = secret.to_dict() if secret_dict['is_hidden_value'] and not \ self._is_hidden_value_permitted(secret): # Hide the value of the secret secret_dict['value'] = '' else: # Returns the decrypted value secret_dict['value'] = decrypt(secret.value) return secret_dict
def get(self, _include=None, filters=None, pagination=None, sort=None, **kwargs): """ List deployment modifications """ modifications = get_storage_manager().list_deployment_modifications( include=_include, filters=filters, pagination=pagination, sort=sort) return modifications
def get(self, snapshot_id): snap = get_storage_manager().get(models.Snapshot, snapshot_id) if snap.status == SnapshotState.FAILED: raise manager_exceptions.SnapshotActionError( 'Failed snapshot cannot be downloaded') snapshot_path = os.path.join(_get_snapshot_path(snapshot_id), '{0}.zip'.format(snapshot_id)) snapshot_uri = '{0}/{1}/{2}/{2}.zip'.format( FILE_SERVER_RESOURCES_FOLDER, FILE_SERVER_SNAPSHOTS_FOLDER, snapshot_id) return rest_utils.make_streaming_response( snapshot_id, snapshot_uri, os.path.getsize(snapshot_path), 'zip')
def _is_global_blueprint(uri): try: resource, tenant, resource_id = uri.split('/')[:3] except Exception: # in case of different format of file server uri return False if resource not in [FILE_SERVER_UPLOADED_BLUEPRINTS_FOLDER, FILE_SERVER_BLUEPRINTS_FOLDER]: return False try: blueprint = get_storage_manager().get(models.Blueprint, resource_id) except NotFoundError: return False return blueprint.visibility == VisibilityState.GLOBAL
def get(self, _include=None, filters=None, pagination=None, sort=None, **kwargs): """ List node instances """ node_instances = get_storage_manager().list_node_instances( include=_include, filters=filters, pagination=pagination, sort=sort) return node_instances
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): """ List uploaded plugins """ return get_storage_manager().list( models.Plugin, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants )
def get(self, snapshot_id): snap = get_storage_manager().get_snapshot(snapshot_id) if snap.status == models.Snapshot.FAILED: raise manager_exceptions.SnapshotActionError( 'Failed snapshot cannot be downloaded') snapshot_path = os.path.join(_get_snapshot_path(snapshot_id), '{0}.zip'.format(snapshot_id)) snapshot_uri = '{0}/{1}/{2}/{2}.zip'.format( config.instance.file_server_resources_uri, config.instance.file_server_snapshots_folder, snapshot_id) return make_streaming_response(snapshot_id, snapshot_uri, os.path.getsize(snapshot_path), 'zip')
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): return get_storage_manager().list(models.Snapshot, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants)
def execute_workflow(name, workflow, workflow_plugins, blueprint_id, deployment_id, execution_id, execution_parameters=None, bypass_maintenance=None, dry_run=False, wait_after_fail=600): execution_parameters = execution_parameters or {} task_name = workflow['operation'] plugin_name = workflow['plugin'] plugin = [p for p in workflow_plugins if p['name'] == plugin_name][0] if plugin and plugin['package_name']: sm = get_storage_manager() filter_plugin = { 'package_name': plugin.get('package_name'), 'package_version': plugin.get('package_version') } managed_plugins = sm.list(models.Plugin, filters=filter_plugin).items if managed_plugins: plugin['visibility'] = managed_plugins[0].visibility plugin['tenant_name'] = managed_plugins[0].tenant_name context = { 'type': 'workflow', 'task_name': task_name, 'task_id': execution_id, 'workflow_id': name, 'blueprint_id': blueprint_id, 'deployment_id': deployment_id, 'execution_id': execution_id, 'bypass_maintenance': bypass_maintenance, 'dry_run': dry_run, 'is_system_workflow': False, 'wait_after_fail': wait_after_fail, 'plugin': { 'name': plugin_name, 'package_name': plugin.get('package_name'), 'package_version': plugin.get('package_version'), 'visibility': plugin.get('visibility'), 'tenant_name': plugin.get('tenant_name') } } return _execute_task(execution_id=execution_id, execution_parameters=execution_parameters, context=context)
def generate_auth_token(): config.instance.load_from_file(RESTSERVICE_CONFIG_PATH) config.instance.rest_service_log_path = '/dev/null' app = server.CloudifyFlaskApp() try: with app.app_context(): sm = storage.get_storage_manager() enc_uid = storage.idencoder.get_encoder().encode(0) admin_user = sm.get(storage.models.User, 0) token_key = admin_user.api_token_key return enc_uid + token_key finally: config.reset(config.Config())
def cancel_execution(execution): sm = get_storage_manager() managers = sm.list(models.Manager) message = { 'service_task': { 'task_name': 'cancel-workflow', 'kwargs': { 'rest_host': [manager.private_ip for manager in managers], 'execution_id': execution.id, 'rest_token': current_user.get_auth_token(), 'tenant': _get_tenant_dict(), 'execution_token': generate_execution_token(execution) } } } _broadcast_mgmtworker_task(message)
def get_labels_keys(resource_model, resource_labels_model, pagination, search): """Get all the resource's labels' keys""" get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False)) results = get_storage_manager().list( resource_labels_model, include=['key'], pagination=pagination, filters={'_labeled_model_fk': resource_model._storage_id}, get_all_results=get_all_results, distinct=['key'], substr_filters=search, sort={'key': 'asc'}) results.items = [label.key for label in results] return results
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, **kwargs): """ List uploaded blueprints """ return get_storage_manager().list(models.Blueprint, include=_include, filters=filters, pagination=pagination, sort=sort, all_tenants=all_tenants)
def wrapper(*args, **kwargs): # getting the tenant name if get_tenant_from == 'header': tenant_name = tenant_for_auth or request.headers.get( CLOUDIFY_TENANT_HEADER) elif get_tenant_from == 'param': tenant_name = tenant_for_auth or kwargs['tenant_name'] elif get_tenant_from == 'data': tenant_name = tenant_for_auth or get_json_and_verify_params({ 'tenant_name': { 'type': text_type } }).get('tenant_name') else: tenant_name = tenant_for_auth # finding tenant to add to the app config if tenant_name: try: tenant = get_storage_manager().get( Tenant, tenant_name, filters={'name': tenant_name}) utils.set_current_tenant(tenant) except NotFoundError: raise ForbiddenError( 'Authorization failed: Tried to authenticate with ' 'invalid tenant name: {0}'.format(tenant_name)) if not current_user.active: raise ForbiddenError('Authorization failed: ' 'User `{0}` is deactivated'.format( current_user.username)) # when running unittests, there is no authorization if config.instance.test_mode: return func(*args, **kwargs) # checking if any of the user's roles is allowed to perform action if is_user_action_allowed(action, tenant_name, allow_all_tenants): return func(*args, **kwargs) # none of the user's role is allowed to perform the action error_message = 'User `{0}` is not permitted to perform the ' \ 'action {1}'.format(current_user.username, action) if tenant_name: error_message += ' in the tenant `{0}`'.format(tenant_name) raise ForbiddenError(error_message)
def execute_workflow( execution, bypass_maintenance=None, wait_after_fail=600, handler: SendHandler = None, ): sm = get_storage_manager() token = generate_execution_token(execution) context = execution.render_context() context.update({ 'wait_after_fail': wait_after_fail, 'bypass_maintenance': bypass_maintenance, 'execution_token': token, 'rest_host': [manager.private_ip for manager in sm.list(models.Manager)], 'rest_token': execution.creator.get_auth_token(), }) if context.get('plugin'): managed_plugins = sm.list(models.Plugin, filters={ 'package_name': context['plugin'].get('package_name'), 'package_version': context['plugin'].get('package_version'), }).items if managed_plugins: context['plugin']['visibility'] = managed_plugins[0].visibility context['plugin']['tenant_name'] = managed_plugins[0].tenant_name execution_parameters = execution.parameters.copy() execution_parameters['__cloudify_context'] = context message = { 'cloudify_task': { 'kwargs': execution_parameters }, 'id': execution.id, 'execution_creator': execution.creator.id } if handler is not None: handler.publish(message) else: _send_mgmtworker_task(message)
def get(self, plugin_id, **kwargs): """ Download plugin archive """ # Verify plugin exists. plugin = get_storage_manager().get(models.Plugin, plugin_id) plugin_path = '{0}/{1}/{2}/{3}'.format( FILE_SERVER_RESOURCES_FOLDER, FILE_SERVER_PLUGINS_FOLDER, plugin_id, plugin.archive_name) return rest_utils.make_streaming_response( plugin_id, plugin_path, 'tar.gz' )
def post(self, **kwargs): """ Upload a plugin """ storage_manager = get_storage_manager() is_caravan = False installed_plugins = [] get_resource_manager().assert_no_snapshot_creation_running_or_queued() try: plugins, code = UploadedCaravanManager().receive_uploaded_data( **kwargs) is_caravan = True except UploadedCaravanManager.InvalidCaravanException: plugin, code = UploadedPluginsManager().receive_uploaded_data( str(uuid4()), **kwargs ) plugins = [plugin] for plugin in plugins: try: get_resource_manager().install_plugin(plugin) installed_plugins.append(plugin) except manager_exceptions.ExecutionTimeout: tp, ex, tb = sys.exc_info() if not is_caravan: raise manager_exceptions.PluginInstallationTimeout( 'Timed out during plugin installation.' '({0}: {1})'.format(tp.__name__, ex)), None, tb except Exception: get_resource_manager().remove_plugin(plugin_id=plugin.id, force=True) tp, ex, tb = sys.exc_info() if not is_caravan: raise manager_exceptions.PluginInstallationError( 'Failed during plugin installation.' '({0}: {1})'.format(tp.__name__, ex)), None, tb if is_caravan: storage_plugins = storage_manager.list( models.Plugin, filters={'id': [p.id for p in installed_plugins]}) return ListResponse(items=storage_plugins.items, metadata=storage_plugins.metadata), code else: return plugins[0], code
def get(self, key): """ Get secret by key """ rest_utils.validate_inputs({'key': key}) secret = get_storage_manager().get(models.Secret, key) secret_dict = secret.to_dict() if secret_dict['is_hidden_value'] and not \ self._is_value_permitted(secret_dict['created_by']): # Hide the value of the secret secret_dict['value'] = '' else: # Returns the decrypted value encryption_key = config.instance.security_encryption_key secret_dict['value'] = cryptography_utils.decrypt( encryption_key, secret.value) return secret_dict
def update_cert(cert_path, name): with open(cert_path) as cert_file: cert = cert_file.read() sm = get_storage_manager() instance = sm.get(models.Certificate, None, filters={'name': name}, fail_silently=True) if instance: if instance.value != cert: instance.value = cert instance.updated_at = datetime.now() sm.update(instance) print('Replaced cert {0} on DB'.format(name)) return print('CA cert {0} was already replaced'.format(name))
def setUp(self): self.tmpdir = tempfile.mkdtemp(prefix='fileserver-') fd, self.rest_service_log = tempfile.mkstemp(prefix='rest-log-') os.close(fd) fd, self.sqlite_db_file = tempfile.mkstemp(prefix='sqlite-db-') os.close(fd) self.file_server = FileServer(self.tmpdir) self.maintenance_mode_dir = tempfile.mkdtemp(prefix='maintenance-') self.addCleanup(self.cleanup) self.file_server.start() # workaround for setting the rest service log path, since it's # needed when 'server' module is imported. # right after the import the log path is set normally like the rest # of the variables (used in the reset_state) fd, self.tmp_conf_file = tempfile.mkstemp(prefix='conf-file-') os.close(fd) with open(self.tmp_conf_file, 'w') as f: json.dump({'rest_service_log_path': self.rest_service_log, 'rest_service_log_file_size_MB': 1, 'rest_service_log_files_backup_count': 1, 'rest_service_log_level': 'DEBUG'}, f) os.environ['MANAGER_REST_CONFIG_PATH'] = self.tmp_conf_file try: from manager_rest import server finally: del(os.environ['MANAGER_REST_CONFIG_PATH']) self.server_configuration = self.create_configuration() server.SQL_DIALECT = 'sqlite' server.reset_app(self.server_configuration) utils.copy_resources(config.instance.file_server_root) self._flask_app_context = server.app.test_request_context() self._flask_app_context.push() self.addCleanup(self._flask_app_context.pop) self.app = self._get_app(server.app) self.client = self.create_client() server.db.create_all() default_tenant = self._init_default_tenant(server.db, server.app) self.sm = get_storage_manager() self._add_users_and_roles(server.user_datastore, default_tenant) self.initialize_provider_context()
def update_provider_context(args): with setup_flask_app().app_context(): sm = get_storage_manager() for manager in sm.list(models.Manager): manager.private_ip = args.manager_ip manager.public_ip = args.manager_ip manager.networks['default'] = args.manager_ip flag_modified(manager, 'networks') sm.update(manager) for broker in sm.list(models.RabbitMQBroker): broker.host = args.manager_ip broker.networks['default'] = args.manager_ip flag_modified(broker, 'networks') sm.update(broker) for db in sm.list(models.DBNodes): db.host = args.manager_ip sm.update(db)
def _handle_default_db_config(): Migrate(app=server.app, db=server.db) try: upgrade(directory=MIGRATION_DIR) except sqlalchemy.exc.OperationalError: logger = logging.getLogger() logger.error("Could not connect to the database - is a " "postgresql server running on localhost?") logger.error("HINT: Create a docker container running postgresql " "by doing `docker run --name cloudify-db-unit-test " "-e POSTGRES_PASSWORD=cloudify -e POSTGRES_USER="******"cloudify -e POSTGRES_DB=cloudify_db -p 5432:5432 " "-d postgres`") raise admin_user = get_admin_user() fd, temp_auth_file = tempfile.mkstemp() os.close(fd) with open(temp_auth_file, 'w') as f: yaml.dump(auth_dict, f) try: # We're mocking the AMQPManager, we aren't really using Rabbit here default_tenant = create_default_user_tenant_and_roles( admin_username=admin_user['username'], admin_password=admin_user['password'], amqp_manager=MagicMock(), authorization_file_path=temp_auth_file) default_tenant.rabbitmq_username = \ 'rabbitmq_username_default_tenant' default_tenant.rabbitmq_vhost = \ 'rabbitmq_vhost_defualt_tenant' default_tenant.rabbitmq_password = \ 'gAAAAABb9p7U_Lnlmg7vyijjoxovyg215ThYi-VCTCzVYa1p-vpzi31WGko' \ 'KD_hK1mQyKgjRss_Nz-3m-cgHpZChnVT4bxZIjnOnL6sF8RtozvlRoGHtnF' \ 'G6jxqQDeEf5Heos0ia4Q5H ' if premium_enabled: # License is required only when working with Cloudify Premium upload_mock_cloudify_license(get_storage_manager()) finally: os.remove(temp_auth_file) utils.set_current_tenant(default_tenant)
def patch(self): """Replace CA certificates on running agents.""" request_dict = get_json_and_verify_params({'bundle': {'type': bool}}) # broker_ca_cert or manager_ca_cert can be None so no need to # specify their type broker_ca_cert = request_dict.get('broker_ca_cert') manager_ca_cert = request_dict.get('manager_ca_cert') bundle = request_dict.get('bundle') sm = get_storage_manager() num_of_updated_agents = 0 new_broker_ca, new_manager_ca = self._get_new_ca_certs( sm, bundle, broker_ca_cert, manager_ca_cert) all_tenants = sm.list(models.Tenant, get_all_results=True) for tenant in all_tenants: tenant_agents = sm.list(models.Agent, get_all_results=True, all_tenants=True, filters={'tenant': tenant}) amqp_client = get_amqp_client(tenant=tenant) to_send = [] for agent in tenant_agents: message = { 'service_task': { 'task_name': 'replace-ca-certs', 'kwargs': { 'new_broker_ca': new_broker_ca, 'new_manager_ca': new_manager_ca } } } handler = SendHandler(agent.rabbitmq_exchange, exchange_type='direct', routing_key='service') to_send.append((handler, message)) amqp_client.add_handler(handler) num_of_updated_agents += 1 with amqp_client: for handler, message in to_send: handler.publish(message) return {'number_of_updated_agents': num_of_updated_agents}
def get(self, multi_tenancy=None, _include=None, filters=None, pagination=None, sort=None, **kwargs): """ List tenants """ if multi_tenancy: return multi_tenancy.list_tenants(_include, filters, pagination, sort) return get_storage_manager().list(models.Tenant, include=_include, filters=filters, pagination=pagination, sort=sort)