def patch(self, schedule_id, **kwargs): """Updates scheduling parameters of an existing execution schedule""" deployment_id = get_args_and_verify_arguments([ Argument('deployment_id', type=text_type, required=True) ])['deployment_id'] sm = get_storage_manager() schedule = sm.get( models.ExecutionSchedule, None, filters={'id': schedule_id, 'deployment_id': deployment_id} ) slip = request.json.get('slip') stop_on_fail = request.json.get('stop_on_fail') enabled = request.json.get('enabled') since = request.json.get('since') until = request.json.get('until') if since: schedule.since = parse_datetime_multiple_formats(since) if until: schedule.until = parse_datetime_multiple_formats(until) if slip is not None: schedule.slip = slip if stop_on_fail is not None: schedule.stop_on_fail = verify_and_convert_bool('stop_on_fail', stop_on_fail) if enabled is not None: schedule.enabled = verify_and_convert_bool('enabled', enabled) schedule.rule = compute_rule_from_scheduling_params( request.json, existing_rule=schedule.rule) schedule.next_occurrence = schedule.compute_next_occurrence() sm.update(schedule) return schedule, 201
def post(self, snapshot_id): _verify_no_multi_node_cluster(action="restore snapshot") request_dict = rest_utils.get_json_and_verify_params( {'recreate_deployments_envs'}) recreate_deployments_envs = rest_utils.verify_and_convert_bool( 'recreate_deployments_envs', request_dict['recreate_deployments_envs']) bypass_maintenance = is_bypass_maintenance_mode() force = rest_utils.verify_and_convert_bool('force', request_dict['force']) restore_certificates = rest_utils.verify_and_convert_bool( 'restore_certificates', request_dict.get('restore_certificates', 'false')) no_reboot = rest_utils.verify_and_convert_bool( 'no_reboot', request_dict.get('no_reboot', 'false')) if no_reboot and not restore_certificates: raise manager_exceptions.BadParametersError( '`no_reboot` is only relevant when `restore_certificates` is ' 'activated') default_timeout_sec = 300 request_timeout = request_dict.get('timeout', default_timeout_sec) timeout = rest_utils.convert_to_int(request_timeout) execution = get_resource_manager().restore_snapshot( snapshot_id, recreate_deployments_envs, force, bypass_maintenance, timeout, restore_certificates, no_reboot) return execution, 200
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, **kwargs): """ List executions """ if '_group_id' in request.args: filters['execution_groups'] = lambda col: col.any( models.ExecutionGroup.id == request.args['_group_id'] ) is_include_system_workflows = rest_utils.verify_and_convert_bool( '_include_system_workflows', request.args.get('_include_system_workflows', False)) get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) return get_resource_manager().list_executions( filters=filters, pagination=pagination, sort=sort, is_include_system_workflows=is_include_system_workflows, include=_include, all_tenants=all_tenants, get_all_results=get_all_results )
def put(self, snapshot_id): rest_utils.validate_inputs({'snapshot_id': snapshot_id}) request_dict = rest_utils.get_json_and_verify_params() include_metrics = rest_utils.verify_and_convert_bool( 'include_metrics', request_dict.get('include_metrics', 'false') ) include_credentials = rest_utils.verify_and_convert_bool( 'include_credentials', request_dict.get('include_credentials', 'true') ) include_logs = rest_utils.verify_and_convert_bool( 'include_logs', request_dict.get('include_logs', 'true') ) include_events = rest_utils.verify_and_convert_bool( 'include_events', request_dict.get('include_events', 'true') ) queue = rest_utils.verify_and_convert_bool( 'queue', request_dict.get('queue', 'false') ) execution = get_resource_manager().create_snapshot( snapshot_id, include_metrics, include_credentials, include_logs, include_events, True, queue ) return execution, 201
def post(self, snapshot_id): request_dict = rest_utils.get_json_and_verify_params( {'recreate_deployments_envs'}) recreate_deployments_envs = rest_utils.verify_and_convert_bool( 'recreate_deployments_envs', request_dict['recreate_deployments_envs']) force = rest_utils.verify_and_convert_bool('force', request_dict['force']) restore_certificates = rest_utils.verify_and_convert_bool( 'restore_certificates', request_dict.get('restore_certificates', 'false')) no_reboot = rest_utils.verify_and_convert_bool( 'no_reboot', request_dict.get('no_reboot', 'false')) ignore_plugin_failure = \ rest_utils.verify_and_convert_bool( 'ignore_plugin_failure', request_dict.get('ignore_plugin_failure', 'false') ) if no_reboot and not restore_certificates: raise manager_exceptions.BadParametersError( '`no_reboot` is only relevant when `restore_certificates` is ' 'activated') default_timeout_sec = 300 request_timeout = request_dict.get('timeout', default_timeout_sec) timeout = rest_utils.convert_to_int(request_timeout) execution = get_resource_manager().restore_snapshot( snapshot_id, recreate_deployments_envs, force, True, timeout, restore_certificates, no_reboot, ignore_plugin_failure) return execution, 200
def _get_secret_params(self, key): rest_utils.validate_inputs({'key': key}) request_dict = rest_utils.get_json_and_verify_params({ 'value': {'type': unicode} }) update_if_exists = rest_utils.verify_and_convert_bool( 'update_if_exists', request_dict.get('update_if_exists', False), ) is_hidden_value = rest_utils.verify_and_convert_bool( 'is_hidden_value', request_dict.get('is_hidden_value', False), ) visibility_param = rest_utils.get_visibility_parameter( optional=True, valid_values=VisibilityState.STATES, ) visibility = get_resource_manager().get_resource_visibility( models.Secret, key, visibility_param ) secret_params = { 'value': request_dict['value'], 'update_if_exists': update_if_exists, 'visibility': visibility, 'is_hidden_value': is_hidden_value } return secret_params
def _get_secret_params(self, key): rest_utils.validate_inputs({'key': key}) request_dict = rest_utils.get_json_and_verify_params( {'value': { 'type': unicode }}) update_if_exists = rest_utils.verify_and_convert_bool( 'update_if_exists', request_dict.get('update_if_exists', False), ) is_hidden_value = rest_utils.verify_and_convert_bool( 'is_hidden_value', request_dict.get('is_hidden_value', False), ) visibility_param = rest_utils.get_visibility_parameter( optional=True, valid_values=VisibilityState.STATES, ) visibility = get_resource_manager().get_resource_visibility( models.Secret, key, visibility_param) secret_params = { 'value': request_dict['value'], 'update_if_exists': update_if_exists, 'visibility': visibility, 'is_hidden_value': is_hidden_value } return secret_params
def post(self, **kwargs): """Execute a workflow""" request_dict = get_json_and_verify_params( {'deployment_id', 'workflow_id'}) allow_custom_parameters = verify_and_convert_bool( 'allow_custom_parameters', request_dict.get('allow_custom_parameters', 'false')) force = verify_and_convert_bool('force', request_dict.get('force', 'false')) dry_run = verify_and_convert_bool('dry_run', request_dict.get('dry_run', 'false')) queue = verify_and_convert_bool('queue', request_dict.get('queue', 'false')) deployment_id = request_dict['deployment_id'] workflow_id = request_dict['workflow_id'] parameters = request_dict.get('parameters', None) if parameters is not None and parameters.__class__ is not dict: raise manager_exceptions.BadParametersError( "request body's 'parameters' field must be a dict but" " is of type {0}".format(parameters.__class__.__name__)) bypass_maintenance = is_bypass_maintenance_mode() execution = get_resource_manager().execute_workflow( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force, dry_run=dry_run, bypass_maintenance=bypass_maintenance, queue=queue) return execution, 201
def _validate_bool_attr(secret, attr): valid = True try: attr = rest_utils.verify_and_convert_bool('', secret[attr]) except manager_exceptions.BadParametersError: valid = False return valid, attr
def get(self): """Get the status of the manager services""" summary_response = verify_and_convert_bool( 'summary', request.args.get('summary', False)) # Systemd should be available on every manager if not get_services: return {'status': ServiceStatus.FAIL, 'services': {}} services = {} systemd_statuses = self._check_systemd_services(services) rabbitmq_status = self._check_rabbitmq(services) # Passing our authentication implies PostgreSQL is healthy self._add_or_update_service(services, 'PostgreSQL', NodeServiceStatus.ACTIVE) syncthing_status = NodeServiceStatus.ACTIVE if ha_utils and ha_utils.is_clustered(): syncthing_status = self._check_syncthing(services) status = self._get_manager_status(systemd_statuses, rabbitmq_status, syncthing_status) # If the response should be only the summary - mainly for LB if summary_response: return {'status': status, 'services': {}} return {'status': status, 'services': services}
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): """ List nodes """ get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False)) nodes_list = get_storage_manager().list( models.Node, include=_include, pagination=pagination, filters=filters, substr_filters=search, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results) # Update the node instance count to account for group scaling policy for node in nodes_list: if not hasattr(node, 'deployment'): continue scale_by = 1 scaling_groups = node.deployment.scaling_groups.values() for group in scaling_groups: if {node.id, node.host_id} & set(group['members']): scale_by *= group['properties']['planned_instances'] node.set_actual_planned_node_instances( scale_by * node.planned_number_of_instances) return nodes_list
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, filter_id=None, **kwargs): """ List uploaded blueprints """ get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False)) filters, _include = rest_utils.modify_blueprints_list_args( filters, _include) filter_rules = get_filter_rules_from_filter_id(filter_id, models.BlueprintsFilter) return get_storage_manager().list(models.Blueprint, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results, filter_rules=filter_rules)
def post(self, snapshot_id): request_dict = rest_utils.get_json_and_verify_params( {'recreate_deployments_envs', 'tenant_name'}) recreate_deployments_envs = rest_utils.verify_and_convert_bool( 'recreate_deployments_envs', request_dict['recreate_deployments_envs']) bypass_maintenance = is_bypass_maintenance_mode() force = rest_utils.verify_and_convert_bool('force', request_dict['force']) tenant_name = request_dict['tenant_name'] default_timeout_sec = 300 request_timeout = request_dict.get('timeout', default_timeout_sec) timeout = rest_utils.convert_to_int(request_timeout) execution = get_resource_manager().restore_snapshot( snapshot_id, recreate_deployments_envs, force, bypass_maintenance, timeout, tenant_name) return execution, 200
def put(self, snapshot_id): request_dict = rest_utils.get_json_and_verify_params() include_metrics = rest_utils.verify_and_convert_bool( 'include_metrics', request_dict.get('include_metrics', 'false')) include_credentials = rest_utils.verify_and_convert_bool( 'include_credentials', request_dict.get('include_credentials', 'true')) include_logs = rest_utils.verify_and_convert_bool( 'include_logs', request_dict.get('include_logs', 'true')) include_events = rest_utils.verify_and_convert_bool( 'include_events', request_dict.get('include_events', 'true')) bypass_maintenance = is_bypass_maintenance_mode() execution = get_resource_manager().create_snapshot( snapshot_id, include_metrics, include_credentials, include_logs, include_events, bypass_maintenance) return execution, 201
def post(self, snapshot_id): request_dict = rest_utils.get_json_and_verify_params( {'recreate_deployments_envs'} ) recreate_deployments_envs = rest_utils.verify_and_convert_bool( 'recreate_deployments_envs', request_dict['recreate_deployments_envs'] ) force = rest_utils.verify_and_convert_bool( 'force', request_dict['force'] ) restore_certificates = rest_utils.verify_and_convert_bool( 'restore_certificates', request_dict.get('restore_certificates', 'false') ) no_reboot = rest_utils.verify_and_convert_bool( 'no_reboot', request_dict.get('no_reboot', 'false') ) ignore_plugin_failure = \ rest_utils.verify_and_convert_bool( 'ignore_plugin_failure', request_dict.get('ignore_plugin_failure', 'false') ) if no_reboot and not restore_certificates: raise manager_exceptions.BadParametersError( '`no_reboot` is only relevant when `restore_certificates` is ' 'activated') default_timeout_sec = 300 request_timeout = request_dict.get('timeout', default_timeout_sec) timeout = rest_utils.convert_to_int(request_timeout) execution = get_resource_manager().restore_snapshot( snapshot_id, recreate_deployments_envs, force, True, timeout, restore_certificates, no_reboot, ignore_plugin_failure ) return execution, 200
def _get_execution_arguments(request_dict): arguments = request_dict.get('execution_arguments') if not arguments: return {} if not isinstance(arguments, dict): raise manager_exceptions.BadParametersError( "execution_arguments: expected a dict, but got: {}" .format(arguments)) return { 'allow_custom_parameters': verify_and_convert_bool( 'allow_custom_parameters', arguments.get('allow_custom_parameters', False)), 'force': verify_and_convert_bool( 'force', arguments.get('force', False)), 'is_dry_run': verify_and_convert_bool( 'dry_run', arguments.get('dry_run', False)), 'wait_after_fail': arguments.get('wait_after_fail', 600) }
def get(self): """Get the status of the entire cloudify cluster""" summary_response = verify_and_convert_bool( 'summary', request.args.get('summary', False)) cluster_status = get_cluster_status() # If the response should be only the summary - mainly for LB if summary_response: return {'status': cluster_status['status'], 'services': {}} return cluster_status
def post(self, **kwargs): """Execute a workflow""" request_dict = get_json_and_verify_params({'deployment_id', 'workflow_id'}) allow_custom_parameters = verify_and_convert_bool( 'allow_custom_parameters', request_dict.get('allow_custom_parameters', 'false')) force = verify_and_convert_bool( 'force', request_dict.get('force', 'false')) dry_run = verify_and_convert_bool( 'dry_run', request_dict.get('dry_run', 'false')) queue = verify_and_convert_bool( 'queue', request_dict.get('queue', 'false')) deployment_id = request_dict['deployment_id'] workflow_id = request_dict['workflow_id'] parameters = request_dict.get('parameters', None) wait_after_fail = request_dict.get('wait_after_fail', 600) scheduled_time = request_dict.get('scheduled_time', None) if scheduled_time: scheduled_time = parse_datetime(scheduled_time) if parameters is not None and parameters.__class__ is not dict: raise manager_exceptions.BadParametersError( "request body's 'parameters' field must be a dict but" " is of type {0}".format(parameters.__class__.__name__)) bypass_maintenance = is_bypass_maintenance_mode() execution = get_resource_manager().execute_workflow( deployment_id, workflow_id, parameters=parameters, allow_custom_parameters=allow_custom_parameters, force=force, dry_run=dry_run, bypass_maintenance=bypass_maintenance, queue=queue, wait_after_fail=wait_after_fail, scheduled_time=scheduled_time) return execution, 201
def post(self): """ Enable/Disable SSL """ request_dict = rest_utils.get_json_and_verify_params({'state'}) state = rest_utils.verify_and_convert_bool('state', request_dict.get('state')) status = 'enabled' if state else 'disabled' if state == SSLConfig._is_enabled(): return 'SSL is already {0} on the manager'.format(status) else: self._set_ssl_state(state) return 'SSL is now {0} on the manager'.format(status)
def get(self): """Get the status of the entire cloudify cluster""" summary_response = verify_and_convert_bool( 'summary', request.args.get('summary', False)) cluster_status = get_cluster_status() # If the response should be only the summary if summary_response: short_status = cluster_status.get(STATUS) status_code = 500 if short_status == ServiceStatus.FAIL else 200 return {'status': short_status, 'services': {}}, status_code return cluster_status
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, **kwargs): """ List executions """ is_include_system_workflows = rest_utils.verify_and_convert_bool( '_include_system_workflows', request.args.get('_include_system_workflows', 'false')) get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False)) return get_resource_manager().list_executions( filters=filters, pagination=pagination, sort=sort, is_include_system_workflows=is_include_system_workflows, include=_include, all_tenants=all_tenants, get_all_results=get_all_results)
def get(self): """Get the status of the manager services""" summary_response = verify_and_convert_bool( 'summary', request.args.get('summary', False)) if not get_services: return {'status': ServiceStatus.FAIL, 'services': {}} status, services = _get_status_and_services() if summary_response: return {'status': status, 'services': {}} return {'status': status, 'services': services}
def put(self, schedule_id, **kwargs): """Schedule a workflow execution""" validate_inputs({'schedule_id': schedule_id}) deployment_id = get_args_and_verify_arguments([ Argument('deployment_id', type=text_type, required=True) ])['deployment_id'] request_dict = get_json_and_verify_params({'workflow_id', 'since'}) workflow_id = request_dict['workflow_id'] execution_arguments = self._get_execution_arguments(request_dict) parameters = request_dict.get('parameters', None) if parameters is not None and not isinstance(parameters, dict): raise manager_exceptions.BadParametersError( "parameters: expected a dict, but got: {0}".format(parameters)) rm = get_resource_manager() deployment = rm.sm.get(models.Deployment, deployment_id) rm._verify_workflow_in_deployment(workflow_id, deployment, deployment_id) since = request_dict['since'] until = request_dict.get('until') if since: since = parse_datetime_multiple_formats(since) if until: until = parse_datetime_multiple_formats(until) rule = compute_rule_from_scheduling_params(request_dict) slip = request_dict.get('slip', 0) stop_on_fail = verify_and_convert_bool( 'stop_on_fail', request_dict.get('stop_on_fail', False)) now = get_formatted_timestamp() schedule = models.ExecutionSchedule( id=schedule_id, deployment=deployment, created_at=now, since=since, until=until, rule=rule, slip=slip, workflow_id=workflow_id, parameters=parameters, execution_arguments=execution_arguments, stop_on_fail=stop_on_fail, ) schedule.next_occurrence = schedule.compute_next_occurrence() return rm.sm.put(schedule), 201
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None): get_all_results = verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) return get_storage_manager().list( models.ExecutionGroup, include=_include, filters=filters, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results )
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): """ List node instances """ get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) return get_storage_manager().list( models.NodeInstance, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results )
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None): """ List sites """ get_all_results = verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) return get_storage_manager().list( models.Site, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results )
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): """ List uploaded blueprints """ get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False) ) return get_storage_manager().list( models.Blueprint, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results )
def list_resource_filters(filters_model, _include=None, pagination=None, sort=None, all_tenants=None, search=None): get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False)) result = get_storage_manager().list( filters_model, include=_include, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results, ) return result
def post(self): """ Enable/Disable SSL """ if not current_user.is_admin: raise_unauthorized_user_error( '{0} does not have privileges to set SSL mode'.format( current_user)) request_dict = rest_utils.get_json_and_verify_params({'state'}) state = rest_utils.verify_and_convert_bool('state', request_dict.get('state')) status = 'enabled' if state else 'disabled' if state == SSLConfig._is_enabled(): return 'SSL is already {0} on the manager'.format(status) source = HTTP_PATH if state else HTTPS_PATH target = HTTPS_PATH if state else HTTP_PATH cmd = 'sudo sed -i "s~{0}~{1}~g" {2}'.format(source, target, DEFAULT_CONF_PATH) check_call(cmd, shell=True) Popen('sleep 1; sudo systemctl restart nginx', shell=True) return 'SSL is now {0} on the manager'.format(status)
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, **kwargs): """ List executions """ deployment_id = request.args.get('deployment_id') if deployment_id: self._check_if_deployment_exists(deployment_id, all_tenants) is_include_system_workflows = rest_utils.verify_and_convert_bool( '_include_system_workflows', request.args.get('_include_system_workflows', 'false')) return get_resource_manager().list_executions( filters=filters, pagination=pagination, sort=sort, is_include_system_workflows=is_include_system_workflows, include=_include, all_tenants=all_tenants)
def get(self, _include=None, filters=None, pagination=None, sort=None, **kwargs): """ List executions """ deployment_id = request.args.get('deployment_id') if deployment_id: get_storage_manager().get(models.Deployment, deployment_id, include=['id']) is_include_system_workflows = rest_utils.verify_and_convert_bool( '_include_system_workflows', request.args.get('_include_system_workflows', 'false')) return get_resource_manager().list_executions( filters=filters, pagination=pagination, sort=sort, is_include_system_workflows=is_include_system_workflows, include=_include)
def get(self, _include=None, filters=None, pagination=None, sort=None, all_tenants=None, search=None, **kwargs): """ List uploaded blueprints """ get_all_results = rest_utils.verify_and_convert_bool( '_get_all_results', request.args.get('_get_all_results', False)) if not filters: filters = {} filters.setdefault('is_hidden', False) return get_storage_manager().list(models.Blueprint, include=_include, filters=filters, substr_filters=search, pagination=pagination, sort=sort, all_tenants=all_tenants, get_all_results=get_all_results)
def wrapper(*args, **kwargs): val = request.args.get('_evaluate_functions', False) val = verify_and_convert_bool('_evaluate_functions', val) kwargs['evaluate_functions'] = val return func(*args, **kwargs)
def _get_data(): get_data = request.args.get('_get_data', False) return verify_and_convert_bool('get_data', get_data)
def is_all_tenants(*args, **kw): all_tenants_flag = verify_and_convert_bool( 'all_tenants', request.args.get('_all_tenants', False)) return func(all_tenants=all_tenants_flag, *args, **kw)