예제 #1
0
파일: tasks.py 프로젝트: LazyWill/cfy3
def wait(**kwargs):

    # check args
    if(not 'deployment_id' in ctx.node.properties):
      raise NonRecoverableError("deployment id not specified for proxy")
    if(not 'wait_for' in ctx.node.properties or
        (ctx.node.properties['wait_for']!='exists' and
         ctx.node.properties['wait_for']!='expr')):
      raise NonRecoverableError("deployment id not specified for proxy")

    client=manager.get_rest_client()
    timeout=ctx.node.properties['timeout']
    start=time.time()

    endtime=start+timeout

    # handle "exists"
    if(ctx.node.properties['wait_for']=='exists'):
      while(time.time()<=endtime):
        try:

          val=manager.get_rest_client().deployments.outputs.get(ctx.node.properties['deployment_id']).outputs[ctx.node.properties['test']]

          if(val!=None):
            ctx.instance.runtime_properties['outputs'] = manager.get_rest_client().deployments.outputs.get(ctx.node.properties['deployment_id']).outputs
            return
        except:
          ctx.logger.info("caught exception {0}".format(sys.exc_info()[0]))
          pass
        time.sleep(5)

    # handle "expr"
    elif(ctx.node.properties['wait_for']=='expr'):
      while(time.time()<=endtime):
        try:
          outputs=manager.get_rest_client().deployments.outputs.get(ctx.node.properties['deployment_id']).outputs

          ctx.logger.info("evaluating {0}".format(ctx.node.properties['test']))
          if(eval(ctx.node.properties['test'])==True):
            ctx.logger.info("evaluated as True")
            ctx.instance.runtime_properties['outputs']=outputs
            return
          else:
            ctx.logger.info("evaluated as False")
      
        except:
          ctx.logger.info("caught exception {0}".format(sys.exc_info()[0]))
        time.sleep(5)
  
    raise RecoverableError("timed out waiting for deployment")
예제 #2
0
 def _create_agent(self, nodes, tenant_name):
     client = None
     for node_instances in nodes.itervalues():
         for node_instance_id, agent in node_instances.iteritems():
             broker_config = self._get_broker_config(agent)
             tenant_name = tenant_name or self._get_tenant_name(
                 node_instance_id)
             client = client or get_rest_client(tenant_name)
             node_instance = client.node_instances.get(node_instance_id)
             runtime_properties = node_instance.runtime_properties
             old_agent = runtime_properties.get('cloudify_agent', {})
             self.insert_agent_to_db(old_agent, node_instance_id, client)
             if not broker_config.get('broker_ip'):
                 broker_config['broker_ip'] = \
                     old_agent.get('manager_ip', '')
             agent['broker_config'] = broker_config
             old_agent.update(agent)
             runtime_properties['cloudify_agent'] = old_agent
             # Results of agent validation on old manager.
             # Might be incorrect for new manager.
             runtime_properties.pop('agent_status', None)
             # Starting from version 4.4 the rest_tenant is not being saved
             # in the runtime properties
             if self._manager_version < V_4_4_0:
                 runtime_properties.pop('rest_tenant', None)
             client.node_instances.update(
                 node_instance_id=node_instance_id,
                 runtime_properties=runtime_properties,
                 version=node_instance.version
             )
예제 #3
0
def get_tenants_list():
    client = manager.get_rest_client(snapshot_constants.DEFAULT_TENANT_NAME)
    version = client.manager.get_version()
    if version['edition'] != 'premium':
        return [snapshot_constants.DEFAULT_TENANT_NAME]
    tenants = client.tenants.list(_include=['name'], _get_all_results=True)
    return [tenant.name for tenant in tenants]
def get_version(instance):
  instance.execute_operation("webapp_upgrade.store_version",kwargs={"property":"__version"}).get()
  
  client=manager.get_rest_client()
  rest_instance=client.node_instances.get(instance.id)
  version=rest_instance.runtime_properties["__version"]
  return version
def wait_for_deployment(**kwargs):
    ctx.logger.info("Entering wait_for_deployment event.")
    if 'deployment_id' not in ctx.node.properties:
        raise exceptions.NonRecoverableError(
            "Deployment ID not specified.")

    client = manager.get_rest_client()
    timeout = ctx.node.properties['timeout']
    deployment_id = ctx.node.properties['deployment_id']

    def _check_if_deployment_is_ready():
        _execs = client.executions.list(
            deployment_id=deployment_id)
        ctx.logger.info("Deployment executions statuses: {0}.".format(
            str([[_e['workflow_id'],
                  _e['status']] for _e in _execs])
        ))
        ctx.logger.info("Are all executions were finished? {0}".format(
            [str(_e['status']) == "terminated" for _e in _execs]))
        return any([str(_e['status']) == "terminated" for _e in _execs])

    poll_until_with_timeout(
        _check_if_deployment_is_ready,
        expected_result=True,
        timeout=timeout)

    ctx.logger.info("Exiting wait_for_deployment event.")
    def __init__(self,
                 config,
                 snapshot_id,
                 recreate_deployments_envs,
                 force,
                 timeout,
                 premium_enabled,
                 user_is_bootstrap_admin,
                 restore_certificates,
                 no_reboot,
                 ignore_plugin_failure):
        self._npm = Npm()
        self._config = utils.DictToAttributes(config)
        self._snapshot_id = snapshot_id
        self._force = force
        self._timeout = timeout
        self._restore_certificates = restore_certificates
        self._no_reboot = no_reboot
        self._premium_enabled = premium_enabled
        self._user_is_bootstrap_admin = user_is_bootstrap_admin
        self._ignore_plugin_failure = \
            ignore_plugin_failure
        self._post_restore_commands = []

        self._tempdir = None
        self._snapshot_version = None
        self._client = get_rest_client()
        self._manager_version = utils.get_manager_version(self._client)
        self._encryption_key = None
        self._semaphore = threading.Semaphore(
            self._config.snapshot_restore_threads)
예제 #7
0
def delete(iface, resource_config, **_):
    '''Deletes AWS EC2 Keypairs'''

    params = \
        dict() if not resource_config else resource_config.copy()

    key_name = params.get(KEYNAME, iface.resource_id)

    iface.delete({KEYNAME: key_name})

    if ctx.node.properties['store_in_runtime_properties']:
        del ctx.instance.runtime_properties['create_response']

    if ctx.node.properties['create_secret']:
        try:
            client = get_rest_client()
        except KeyError:  # No pun intended.
            raise NonRecoverableError(
                'create_secret is only supported with a Cloudify Manager.')
        secret_name = ctx.node.properties.get('secret_name', key_name)
        try:
            client.secrets.delete(key=secret_name)
        except CloudifyClientError as e:
            raise NonRecoverableError(
                'Failed to store secret: {0}.'.format(str(e)))
def create_deployment(deployment_inputs=None, **kwargs):
    ctx.logger.info("Entering create_deployment event.")
    client = manager.get_rest_client()
    blueprint_id = ctx.node.properties['blueprint_id']
    ctx.logger.info("Blueprint ID: %s" % blueprint_id)
    deployment_id = "{0}-{1}".format(blueprint_id,
                                     str(uuid.uuid4()))
    use_existing_deployment = ctx.node.properties['use_existing_deployment']
    existing_deployment_id = ctx.node.properties['existing_deployment_id']
    try:
        if not use_existing_deployment:
            ctx.logger.info("deployment ID to create: %s" % deployment_id)
            deployment = client.deployments.create(
                blueprint_id,
                deployment_id,
                inputs=deployment_inputs)
            ctx.logger.info("Deployment object {0}."
                            .format(str(deployment)))
        else:
            client.deployments.get(existing_deployment_id)
            deployment_id = existing_deployment_id
        ctx.logger.info("Instance runtime properties %s"
                        % str(ctx.instance.runtime_properties))
        proxy_common.poll_until_with_timeout(
            proxy_common.check_if_deployment_is_ready(
                client, deployment_id),
            expected_result=True,
            timeout=900)
        ctx.instance.runtime_properties.update(
            {'deployment_id': deployment_id})
    except Exception as ex:
        ctx.logger.error(str(ex))
        raise exceptions.NonRecoverableError(str(ex))

    ctx.logger.info("Exiting create_validation event.")
def _dump_agents(tempdir):
    ctx.send_event('Preparing agents data')
    client = get_rest_client()
    broker_config = BootstrapContext(ctx.bootstrap_context).broker_config()
    defaults = {
        'version': str(_get_manager_version(client)),
        'broker_config': broker_config
    }
    result = {}
    for deployment in client.deployments.list():
        deployment_result = {}
        for node in client.nodes.list(deployment_id=deployment.id):
            if _is_compute(node):
                node_result = {}
                for node_instance in client.node_instances.list(
                        deployment_id=deployment.id,
                        node_name=node.id):
                    overrides = {}
                    current = node_instance.runtime_properties.get(
                        'cloudify_agent', {})
                    for k, v in defaults.iteritems():
                        overrides[k] = current.get(k, v)
                    node_result[node_instance.id] = overrides
                deployment_result[node.id] = node_result
        result[deployment.id] = deployment_result
    with open(os.path.join(tempdir, _AGENTS_FILE), 'w') as out:
        out.write(json.dumps(result))
    def __init__(self, ctx):
        self._context = ctx or {}

        self._local_task_thread_pool_size = ctx.get(
            'local_task_thread_pool_size',
            DEFAULT_LOCAL_TASK_THREAD_POOL_SIZE)
        self._task_retry_interval = ctx.get('task_retry_interval',
                                            DEFAULT_RETRY_INTERVAL)
        self._task_retries = ctx.get('task_retries',
                                     DEFAULT_TOTAL_RETRIES)
        self._logger = None

        self.blueprint = context.BlueprintContext(self._context)
        self.deployment = WorkflowDeploymentContext(self._context, self)

        if self.local:
            storage = ctx.pop('storage')
            raw_nodes = storage.get_nodes()
            raw_node_instances = storage.get_node_instances()
            handler = LocalCloudifyWorkflowContextHandler(self, storage)
        else:
            rest = get_rest_client()
            raw_nodes = rest.nodes.list(self.deployment.id)
            raw_node_instances = rest.node_instances.list(self.deployment.id)
            handler = RemoteCloudifyWorkflowContextHandler(self)

        super(CloudifyWorkflowContext, self).__init__(
            self, raw_nodes, raw_node_instances)

        self._internal = CloudifyWorkflowContextInternal(self, handler)
예제 #11
0
def create(ctx, iface, resource_config, **_):
    '''Creates AWS EC2 Keypairs'''

    params = \
        dict() if not resource_config else resource_config.copy()

    params[KEYNAME] = utils.get_resource_name(params.get(KEYNAME))
    key_name = params[KEYNAME]

    if PUBLIC_KEY_MATERIAL in params:
        create_response = \
            iface.import_keypair(
                params,
                log_response=ctx.node.properties['log_create_response'])
    else:
        create_response = iface.create(
            params, log_response=ctx.node.properties['log_create_response'])

        # Allow the end user to store the key material in a secret.
        if ctx.node.properties['create_secret']:

            try:
                client = get_rest_client()
            except KeyError:  # No pun intended.
                raise NonRecoverableError(
                    'create_secret is only supported with a Cloudify Manager.')

            # This makes the line too long for flake8 if included in args.
            secret_name = ctx.node.properties.get('secret_name', key_name)
            secrets_count = len(client.secrets.list(key=secret_name))
            secret_value = create_response.get('KeyMaterial')

            try:
                if secrets_count == 0:
                    client.secrets.create(
                        key=secret_name,
                        value=secret_value)
                elif secrets_count == 1 and \
                        ctx.node.properties.get(
                            'update_existing_secret', False) is True:
                    client.secrets.update(
                        key=secret_name,
                        value=secret_value)
            except CloudifyClientError as e:
                raise NonRecoverableError(str(e))

    cleaned_create_response = \
        utils.JsonCleanuper(create_response).to_dict()

    # Allow the end user to opt-in to storing the key
    # material in the runtime properties.
    # Default is false
    if 'KeyMaterial' in cleaned_create_response and not \
            ctx.node.properties['store_in_runtime_properties']:
        del cleaned_create_response['KeyMaterial']
    ctx.instance.runtime_properties['create_response'] = \
        cleaned_create_response

    iface.update_resource_id(cleaned_create_response.get(KEYNAME))
    utils.update_resource_id(ctx.instance, key_name)
def inherit_deployment_attributes(deployment_id, **kwargs):
    ctx.logger.info("Entering obtain_outputs event.")
    client = manager.get_rest_client()
    outputs = ctx.node.properties["inherit_outputs"]
    ctx.logger.info("Outputs to inherit: {0}.".format(str(outputs)))
    ctx.logger.info("deployment id %s" % deployment_id)
    inherit_inputs = ctx.node.properties["inherit_inputs"]
    ctx.instance.runtime_properties.update({"inherit_outputs": outputs, "deployment_id": deployment_id})
    try:
        if inherit_inputs:
            _inputs = client.deployments.get(deployment_id)["inputs"]
            ctx.instance.runtime_properties.update({"proxy_deployment_inputs": _inputs})
        deployment_outputs = client.deployments.outputs.get(deployment_id)["outputs"]
        ctx.logger.info("Available deployment outputs {0}.".format(str(deployment_outputs)))
        ctx.logger.info("Available runtime properties: {0}.".format(str(ctx.instance.runtime_properties.keys())))
        for key in outputs:
            ctx.instance.runtime_properties.update({key: deployment_outputs.get(key)})
    except Exception as ex:
        ctx.logger.error(
            "Caught exception during obtaining " "deployment outputs {0} {1}".format(sys.exc_info()[0], str(ex))
        )
        raise exceptions.NonRecoverableError(
            "Caught exception during obtaining "
            "deployment outputs {0} {1}. Available runtime properties {2}".format(
                sys.exc_info()[0], str(ex), str(ctx.instance.runtime_properties.keys())
            )
        )
    ctx.logger.info("Exiting obtain_outputs event.")
def delete_deployment(**kwargs):
    ctx.logger.info("Entering delete_deployment event.")

    if 'deployment_id' not in ctx.instance.runtime_properties:
        raise exceptions.NonRecoverableError(
            "Deployment ID as runtime property not specified.")

    client = manager.get_rest_client()
    deployment_id = ctx.instance.runtime_properties[
        'deployment_id']
    ignore = ctx.node.properties['ignore_live_nodes_on_delete']
    try:
        proxy_common.poll_until_with_timeout(
            proxy_common.check_if_deployment_is_ready(
                client, deployment_id),
            expected_result=True,
            timeout=900)
        client.deployments.delete(deployment_id,
                                  ignore_live_nodes=ignore)
    except Exception as ex:
        ctx.logger.error("Error during deployment deletion {0}. "
                         "Reason: {1}."
                         .format(deployment_id, str(ex)))
        raise exceptions.NonRecoverableError(
            "Error during deployment uninstall {0}. "
            "Reason: {1}.".format(deployment_id, str(ex)))
    ctx.logger.info("Exiting delete_deployment event.")
예제 #14
0
def saving_operation_info(ctx, op, main_node, second_node=None, **_):
    with update_storage(ctx) as data:
        invocations = data['mock_operation_invocation'] = data.get(
            'mock_operation_invocation', []
        )
        num = data.get('num', 0) + 1
        data['num'] = num

        op_info = {'operation': op, 'num': num}
        if second_node is None:
            op_info.update({
                'node': main_node.node.name,
                'id': main_node.instance.id,
                'target_ids': [r.target.instance.id
                               for r in main_node.instance.relationships]
            })
        else:
            op_info.update({
                'id': main_node.instance.id,
                'source': main_node.node.name,
                'target': second_node.node.name
            })
        invocations.append(op_info)

    client = get_rest_client()
    fail_input = client.deployments.get(ctx.deployment.id).inputs.get(
        'fail', [])
    fail_input = [i for i in fail_input if
                  i.get('workflow') == ctx.workflow_id and
                  i.get('node') == main_node.node.id and
                  i.get('operation') == ctx.operation.name]
    if fail_input:
        raise RuntimeError('TEST_EXPECTED_FAIL')
    def _restore_plugins(self, existing_plugins):
        """Install any plugins that weren't installed prior to the restore

        :param existing_plugins: Names of already installed plugins
        """
        ctx.logger.info('Restoring plugins')
        plugins_to_install = self._get_plugins_to_install(existing_plugins)
        failed_plugins = []
        for tenant, plugins in plugins_to_install.items():
            client = get_rest_client(tenant=tenant)
            plugins_tmp = tempfile.mkdtemp()
            try:
                for plugin in plugins:
                    try:
                        self._restore_plugin(client,
                                             tenant,
                                             plugin,
                                             plugins_tmp)
                    except Exception as ex:
                        if self._ignore_plugin_failure:
                            ctx.logger.warning(
                                'Failed to restore plugin: {0}, '
                                'ignore-plugin-failure flag '
                                'used. Proceeding...'.format(plugin))
                            ctx.logger.debug('Restore plugin failure error: '
                                             '{0}'.format(ex))
                            failed_plugins.append(plugin)
                        else:
                            raise ex
                self._wait_for_plugin_executions(client)
                SnapshotRestore.__remove_failed_plugins_footprints(
                    client, failed_plugins)
            finally:
                os.rmdir(plugins_tmp)
        SnapshotRestore.__log_message_for_plugin_restore(failed_plugins)
 def manager_removed(self):
     logger.info('A manager has been removed from the cluster, updating '
                 'Syncthing')
     syncthing_utils.mgmtworker_update_devices(
         rest_client=get_rest_client(
             tenant='default_tenant',
             api_token=get_admin_api_token()
         ))
def create(snapshot_id, config, **kwargs):
    update_status = get_rest_client().snapshots.update_status
    config = _DictToAttributes(config)
    try:
        _create(snapshot_id, config, **kwargs)
        update_status(snapshot_id, config.created_status)
    except BaseException, e:
        update_status(snapshot_id, config.failed_status, str(e))
        raise
    def evaluate_functions(self, payload):
        client = manager.get_rest_client()

        def evaluate_functions_method(deployment_id, context, payload):
            return client.evaluate.functions(deployment_id,
                                             context,
                                             payload)['payload']
        return self._evaluate_functions_impl(payload,
                                             evaluate_functions_method)
예제 #19
0
    def __init__(self, operation_inputs):
        """
        Sets the properties that all operations need.
        :param operation_inputs: The inputs from the operation.
        """

        full_operation_name = ctx.operation.name
        self.operation_name = full_operation_name.split('.').pop()

        # Cloudify client setup
        self.client_config = self._get_desired_operation_input(
            'client', operation_inputs)

        if self.client_config:
            self.client = CloudifyClient(**self.client_config)
        else:
            self.client = manager.get_rest_client()

        self.plugins = self._get_desired_operation_input(
            'plugins', operation_inputs)
        self.secrets = self._get_desired_operation_input(
            'secrets', operation_inputs)
        self.config = self._get_desired_operation_input(
            'resource_config', operation_inputs)

        # Blueprint-related properties
        self.blueprint = self.config.get('blueprint', {})
        self.blueprint_id = self.blueprint.get('id') or ctx.instance.id
        self.blueprint_file_name = self.blueprint.get('main_file_name')
        self.blueprint_archive = self.blueprint.get('blueprint_archive')

        # Deployment-related properties
        runtime_deployment_prop = ctx.instance.runtime_properties.get(
            'deployment', {})
        runtime_deployment_id = runtime_deployment_prop.get('id')

        self.deployment = self.config.get('deployment', {})
        self.deployment_id = (runtime_deployment_id or
                              self.deployment.get('id') or
                              ctx.instance.id)
        self.deployment_inputs = self.deployment.get('inputs', {})
        self.deployment_logs = self.deployment.get('logs', {})
        self.deployment_auto_suffix = self.deployment.get('auto_inc_suffix',
                                                          False)

        # Execution-related properties
        self.workflow_id = operation_inputs.get(
            'workflow_id',
            'create_deployment_environment')
        self.workflow_state = operation_inputs.get('workflow_state',
                                                   'terminated')

        # Polling-related properties
        self.interval = operation_inputs.get('interval', POLLING_INTERVAL)
        self.state = operation_inputs.get('state', 'terminated')
        self.timeout = operation_inputs.get('timeout', EXECUTIONS_TIMEOUT)
예제 #20
0
def restore(snapshot_id, recreate_deployments_envs, config, force, timeout,
            **kwargs):

    ctx.logger.info('Restoring snapshot {0}'.format(snapshot_id))

    config = _DictToAttributes(config)

    _assert_clean_elasticsearch(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')

    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(
            file_server_root,
            config.file_server_snapshots_folder
        )

        snapshot_path = os.path.join(snapshots_dir, snapshot_id, '{0}.zip'
                                     .format(snapshot_id))

        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)

        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)

        client = get_rest_client()

        manager_version = _get_manager_version(client)
        from_version = ManagerVersion(metadata[_M_VERSION])

        ctx.logger.info('Manager version = {0}, snapshot version = {1}'.format(
            str(manager_version), str(from_version)))

        if from_version.greater_than(manager_version):
            raise NonRecoverableError(
                'Cannot restore a newer manager\'s snapshot on this manager '
                '[{0} > {1}]'.format(str(from_version), str(manager_version)))

        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.logger.info('Starting restoring snapshot of manager {0}'
                        .format(from_version))

        new_plugins = _restore_snapshot(config, tempdir, metadata, timeout)

        install_plugins(new_plugins)

        if recreate_deployments_envs:
            recreate_deployments_environments(existing_deployments_ids)

        ctx.logger.info('Successfully restored snapshot of manager {0}'
                        .format(from_version))
    finally:
        shutil.rmtree(tempdir)
예제 #21
0
def process_subs(s):

  with open("/tmp/subs","a+") as f:
    f.write("processing "+s)

  pat='@{([^}]+)}|%{([^}]+)}'
  client=None
  m=re.search(pat,s)

  with open("/tmp/subs","a+") as f:
    f.write(" m "+str(m)+"\n")

  if(not m):
    #no patterns found
    ctx.logger.info('no pattern found:{}'.format(s))
    return s;
  while(m):

    # Match @ syntax.  Gets runtime properties
    if(m.group(1)):
      with open("/tmp/subs","a+") as f:
        f.write(" m.group(1)="+str(m.group(1))+"\n")
      fields=m.group(1).split(',')
      if m and len(fields)>1:
        # do substitution
        if(not client):
          client=manager.get_rest_client()
        instances=client.node_instances.list(deployment_id=ctx.deployment.id,node_name=fields[0])
        if(instances and len(instances)):
          #just use first instance if more than one
          val=instances[0].runtime_properties
          for field in fields[1:]:
            field=field.strip()
            val=val[field]    #handle nested maps
  
          s=s[:m.start()]+str(val)+s[m.end(1)+1:]
          m=re.search(pat,s)
        else:
          raise Exception("no instances found for node: {}".format(fields[0]))
      else:
        raise Exception("invalid pattern: "+s)

    # Match % syntax.  Gets context property.
    # also handles special token "management_ip"
    elif(m.group(2)):
      with open("/tmp/subs","a+") as f:
        f.write("m.group(2)="+str(m.group(2))+"\n")
      if(m.group(2)=="management_ip"):
        s=s[:m.start()]+str(utils.get_manager_ip())+s[m.end(2)+1:]
      else:
        s=s[:m.start()]+str(eval("ctx."+m.group(2)))+s[m.end(2)+1:]
      m=re.search(pat,s)
      
  return s
예제 #22
0
        def wrapper(*args, **kwargs):
            ctx = _find_context_arg(args, kwargs,
                                    _is_cloudify_workflow_context)
            if ctx is None:
                ctx = {}
            if not _is_cloudify_workflow_context(ctx):
                ctx = CloudifyWorkflowContext(ctx)
                kwargs['ctx'] = ctx

            rest = get_rest_client()
            parent_conn, child_conn = Pipe()
            try:
                if rest.executions.get(ctx.execution_id).status in \
                        (Execution.CANCELLING, Execution.FORCE_CANCELLING):
                    # execution has been requested to be cancelled before it
                    # was even started
                    update_execution_cancelled(ctx)
                    return api.EXECUTION_CANCELLED_RESULT

                update_execution_status(ctx.execution_id, Execution.STARTED)
                send_workflow_event(ctx,
                                    event_type='workflow_started',
                                    message="Starting '{}' workflow execution"
                                            .format(ctx.workflow_id))

                # the actual execution of the workflow will run in another
                # process - this wrapper is the entry point for that
                # process, and takes care of forwarding the result or error
                # back to the parent process
                def child_wrapper():
                    try:
                        start_event_monitor(ctx)
                        current_workflow_ctx.set(ctx, kwargs)
                        result = func(*args, **kwargs)
                        if not ctx.internal.graph_mode:
                            tasks = list(ctx.internal.task_graph.tasks_iter())
                            for task in tasks:
                                task.async_result.get()
                        child_conn.send({'result': result})
                    except api.ExecutionCancelled:
                        child_conn.send({
                            'result': api.EXECUTION_CANCELLED_RESULT})
                    except BaseException, e:
                        tb = StringIO()
                        traceback.print_exc(file=tb)
                        err = {
                            'type': type(e).__name__,
                            'message': str(e),
                            'traceback': tb.getvalue()
                        }
                        child_conn.send({'error': err})
                    finally:
                        current_workflow_ctx.clear()
                        child_conn.close()
예제 #23
0
def get_managed_plugin(plugin, logger=None):
    package_name = plugin.get('package_name')
    package_version = plugin.get('package_version')
    distribution = plugin.get('distribution')
    distribution_version = plugin.get('distribution_version')
    distribution_release = plugin.get('distribution_release')
    supported_platform = plugin.get('supported_platform')

    if not (package_name and package_version):
        if package_name and logger:
            logger.warn('package_name {0} is specified but no package_version '
                        'found, skipping wagon installation.'
                        .format(package_name))
        return None

    query_parameters = {
        'package_name': package_name,
        'package_version': package_version
    }
    if distribution:
        query_parameters['distribution'] = distribution
    if distribution_version:
        query_parameters['distribution_version'] = distribution_version
    if distribution_release:
        query_parameters['distribution_release'] = distribution_release
    if supported_platform:
        query_parameters['supported_platform'] = supported_platform
    client = get_rest_client()
    plugins = client.plugins.list(**query_parameters)

    if not supported_platform:
        current_platform = wagon_utils.get_platform()
        plugins = [p for p in plugins
                   if p.supported_platform in ['any', current_platform]]
    if os.name != 'nt':
        a_dist, _, a_dist_release = platform.linux_distribution(
            full_distribution_name=False)
        a_dist, a_dist_release = a_dist.lower(), a_dist_release.lower()
        if not distribution:
            plugins = [p for p in plugins
                       if p.supported_platform == 'any' or
                       p.distribution == a_dist]
        if not distribution_release:
            plugins = [p for p in plugins
                       if p.supported_platform == 'any' or
                       p.distribution_release == a_dist_release]

    if not plugins:
        return None

    # we return the first one because both package name and version
    # are required fields. No one pick is better than the other
    return plugins[0]
def get_additional_node_groups(node_name, deployment_id):
    """This enables users to reuse hosts in multiple groups."""
    groups = []
    try:
        client = get_rest_client()
    except KeyError:
        return groups
    deployment = client.deployments.get(deployment_id)
    for group_name, group in deployment.get('groups', {}).items():
        if node_name in group.get('members', []) and group_name:
            groups.append(group_name)
    return groups
예제 #25
0
def _restore_elasticsearch(tempdir, es, metadata, bulk_read_timeout):

    has_cloudify_events_index = es.indices.exists(index=_EVENTS_INDEX_NAME)
    snap_has_cloudify_events_index = metadata[_M_HAS_CLOUDIFY_EVENTS]

    existing_plugins = set(p.archive_name for p in
                           get_rest_client().plugins.list().items)
    new_plugins = []

    # cloudify_events -> cloudify_events, logstash-* -> logstash-*
    def get_data_itr():
        for line in open(os.path.join(tempdir, _ELASTICSEARCH), 'r'):
            elem = json.loads(line)
            if _include_es_node(elem, existing_plugins, new_plugins):
                _update_es_node(elem)
                yield elem

    _check_conflicts(es, get_data_itr())

    # logstash-* -> cloudify_events
    def logstash_to_cloudify_events():
        for elem in get_data_itr():
            if elem['_index'].startswith('logstash-'):
                elem['_index'] = _EVENTS_INDEX_NAME
            yield elem

    def cloudify_events_to_logstash():
        d = datetime.now()
        index = 'logstash-{0}'.format(d.strftime('%Y.%m.%d'))
        for elem in get_data_itr():
            if elem['_index'] == _EVENTS_INDEX_NAME:
                elem['_index'] = index
            yield elem

    # choose iter
    if (has_cloudify_events_index and snap_has_cloudify_events_index) or\
            (not has_cloudify_events_index and
             not snap_has_cloudify_events_index):
        data_iter = get_data_itr()
    elif not snap_has_cloudify_events_index and has_cloudify_events_index:
        data_iter = logstash_to_cloudify_events()
    else:
        data_iter = cloudify_events_to_logstash()

    ctx.logger.info('Restoring ElasticSearch data '
                    '[timeout={0} sec]'.format(bulk_read_timeout))
    elasticsearch.helpers.bulk(es,
                               data_iter,
                               request_timeout=bulk_read_timeout)
    es.indices.flush()

    return new_plugins
 def start_deployment_modification(self, nodes):
     deployment_id = self.workflow_ctx.deployment.id
     client = get_rest_client()
     modification = client.deployment_modifications.start(
         deployment_id=deployment_id,
         nodes=nodes,
         context={
             'blueprint_id': self.workflow_ctx.blueprint.id,
             'deployment_id': self.workflow_ctx.deployment.id,
             'execution_id': self.workflow_ctx.execution_id,
             'workflow_id': self.workflow_ctx.workflow_id,
         })
     return Modification(self.workflow_ctx, modification)
예제 #27
0
    def can_scheduled_execution_start(execution_id, tenant):
        """
        This method checks whether or not a scheduled execution can currently
        start running. If it can't - it changes the executions status to
        QUEUED (so that it will automatically start running when possible)
        """
        api_token = get_admin_api_token()
        tenant_client = get_rest_client(tenant=tenant, api_token=api_token)
        if tenant_client.executions.should_start(execution_id):
            return True

        tenant_client.executions.update(execution_id, ExecutionState.QUEUED)
        return False
def wait_for_deployment(deployment_id, **kwargs):
    ctx.logger.info("Entering wait_for_deployment event.")
    ctx.logger.info("Using deployment %s" % deployment_id)
    if not deployment_id:
        raise exceptions.NonRecoverableError("Deployment ID not specified.")

    client = manager.get_rest_client()
    timeout = ctx.node.properties["timeout"]
    proxy_common.poll_until_with_timeout(
        proxy_common.check_if_deployment_is_ready(client, deployment_id), expected_result=True, timeout=timeout
    )

    ctx.logger.info("Exiting wait_for_deployment event.")
예제 #29
0
def get_managed_plugin(plugin):
    package_name = plugin.get('package_name')
    package_version = plugin.get('package_version')
    distribution = plugin.get('distribution')
    distribution_version = plugin.get('distribution_version')
    distribution_release = plugin.get('distribution_release')
    supported_platform = plugin.get('supported_platform')

    if not package_name:
        return None

    query_parameters = {
        'package_name': package_name
    }
    if package_version:
        query_parameters['package_version'] = package_version
    if distribution:
        query_parameters['distribution'] = distribution
    if distribution_version:
        query_parameters['distribution_version'] = distribution_version
    if distribution_release:
        query_parameters['distribution_release'] = distribution_release
    if supported_platform:
        query_parameters['supported_platform'] = supported_platform
    client = get_rest_client()
    plugins = client.plugins.list(**query_parameters)

    if not supported_platform:
        current_platform = wagon_utils.get_platform()
        plugins = [p for p in plugins
                   if p.supported_platform in ['any', current_platform]]
    if os.name != 'nt':
        a_dist, _, a_dist_release = platform.linux_distribution(
            full_distribution_name=False)
        a_dist, a_dist_release = a_dist.lower(), a_dist_release.lower()
        if not distribution:
            plugins = [p for p in plugins
                       if p.supported_platform == 'any' or
                       p.distribution == a_dist]
        if not distribution_release:
            plugins = [p for p in plugins
                       if p.supported_platform == 'any' or
                       p.distribution_release == a_dist_release]

    if not plugins:
        return None

    plugin_result = max(plugins,
                        key=lambda plug: LooseVersion(plug.package_version))
    return plugin_result
예제 #30
0
 def dump(self, tempdir, manager_version):
     self._manager_version = manager_version
     result = {}
     for tenant_name in get_tenants_list():
         result[tenant_name] = {}
         tenant_client = get_rest_client(tenant_name)
         tenant_deployments = tenant_client.deployments.list(
             _include=['id'],
             _get_all_results=True
         )
         for deployment in tenant_deployments:
             result[tenant_name][deployment.id] = \
                 self._get_deployment_result(tenant_client, deployment.id)
     self._dump_result_to_file(tempdir, result)
예제 #31
0
 def schedule(self, deployment_id):
     client = get_rest_client()
     workflow_args = {
         'deployment_id':
         deployment_id,
         'workflow_id':
         'check_policy',
         'parameters': {
             'policy_manager_id': self.policy_manager_id,
             'policy_id': self.name,
         },
         'schedule':
         '{0}-0000'.format(self.next_execution.strftime(SCHED_FORMAT))
     }
     client.executions.start(**workflow_args)
예제 #32
0
def _update_runtime_properties(ctx, instance_id, properties_updates):
    manager = get_rest_client()

    resulted_state = manager.node_instances.get(instance_id)
    ctx.logger.debug('State before update: {}'
                     .format(repr(resulted_state)))
    ctx.logger.info("Update node: {}".format(instance_id))
    runtime_properties = resulted_state.runtime_properties or {}
    runtime_properties.update(properties_updates)
    manager.node_instances.update(node_instance_id=instance_id,
                                  runtime_properties=runtime_properties,
                                  version=resulted_state.version + 1)
    resulted_state = manager.node_instances.get(instance_id)
    ctx.logger.debug('State after update: {}'
                     .format(repr(resulted_state)))
예제 #33
0
def _cleanup_instances(ctx, instance_ids):
    manager = get_rest_client()

    for instance_id in instance_ids:
        resulted_state = manager.node_instances.get(instance_id)
        ctx.logger.debug('State before update: {}'
                         .format(repr(resulted_state)))
        ctx.logger.info("Cleanup node: {}".format(instance_id))
        manager.node_instances.update(node_instance_id=instance_id,
                                      runtime_properties={},
                                      state='uninitialized',
                                      version=resulted_state.version + 1)
        resulted_state = manager.node_instances.get(instance_id)
        ctx.logger.debug('State after update: {}'
                         .format(repr(resulted_state)))
예제 #34
0
    def get_brokers(self, network='default'):
        client = manager.get_rest_client()

        brokers = client.manager.get_brokers()

        # Convert the IPs based on the network
        pctx = self.get_provider_context()
        networks = pctx['cloudify']['cloudify_agent']['networks']
        network_broker_ips = networks[network]['brokers']

        for broker_id, broker_ip in enumerate(network_broker_ips):
            brokers[broker_id]['host'] = broker_ip
            brokers[broker_id]['management_host'] = broker_ip

        return brokers
예제 #35
0
def _wagon_install(plugin, venv, args):
    client = get_rest_client()
    wagon_dir = tempfile.mkdtemp(prefix='{0}-'.format(plugin.id))
    wagon_path = os.path.join(wagon_dir, 'wagon.tar.gz')
    try:
        ctx.logger.debug('Downloading plugin %s from manager into %s',
                         plugin.id, wagon_path)
        client.plugins.download(plugin_id=plugin.id, output_file=wagon_path)
        ctx.logger.debug('Installing plugin %s using wagon', plugin.id)
        wagon.install(wagon_path,
                      ignore_platform=True,
                      install_args=args,
                      venv=venv)
    finally:
        ctx.logger.debug('Removing directory: %s', wagon_dir)
        shutil.rmtree(wagon_dir, ignore_errors=True)
예제 #36
0
    def __init__(self, config, snapshot_id, recreate_deployments_envs, force,
                 timeout, premium_enabled, user_is_bootstrap_admin,
                 restore_certificates, no_reboot):
        self._npm = Npm()
        self._config = utils.DictToAttributes(config)
        self._snapshot_id = snapshot_id
        self._force = force
        self._timeout = timeout
        self._restore_certificates = restore_certificates
        self._no_reboot = no_reboot
        self._premium_enabled = premium_enabled
        self._user_is_bootstrap_admin = user_is_bootstrap_admin

        self._tempdir = None
        self._snapshot_version = None
        self._client = get_rest_client()
예제 #37
0
    def can_scheduled_execution_start(execution_id, tenant):
        """
        This method checks if a scheduled execution can currently start. If it
        wasn't cancelled but can't currently start - it changes the executions
        status to QUEUED (so it will automatically start when possible).
        """
        api_token = get_admin_api_token()
        tenant_client = get_rest_client(tenant=tenant, api_token=api_token)
        execution = tenant_client.executions.get(execution_id)
        if execution['status'] == ExecutionState.CANCELLED:
            return False
        if tenant_client.executions.should_start(execution_id):
            return True

        tenant_client.executions.update(execution_id, ExecutionState.QUEUED)
        return False
예제 #38
0
 def _get_tenant_name(cls, node_instance_id):
     """
     When restoring a snapshot from versions 4.0.0/4.0.1 the tenant name is
     not defined and the only way to `guess` it is by finding the
     node_instance from the agents.json file in the DB and checking its
     tenant. Using list to scan all tenants and filter by id.
     :param node_instance_id: a node instance from the agents.json file
     :return: the tenant of the given node instance
     """
     client = get_rest_client()
     try:
         node_instance = client.node_instances.list(
             _all_tenants=True, id=node_instance_id).items[0]
         return node_instance['tenant_name']
     except CloudifyClientError:
         pass
예제 #39
0
def _initialize_rabbitmq_user(cloudify_agent):
    client = get_rest_client()
    # Generate a rabbitmq user for the agent
    username = USERNAME_PATTERN.format(cloudify_agent['name'])
    password = utils.generate_user_password()

    try:
        # In case the agent already exists
        agent = client.agents.get(cloudify_agent['name'])
        cloudify_agent['broker_user'] = agent.rabbitmq_username
        cloudify_agent['broker_pass'] = agent.rabbitmq_password
    except CloudifyClientError as e:
        if e.status_code != 404:
            raise
        cloudify_agent['broker_user'] = username
        cloudify_agent['broker_pass'] = password
예제 #40
0
    def _restore_plugins(self, existing_plugins):
        """Install any plugins that weren't installed prior to the restore

        :param existing_plugins: Names of already installed plugins
        """
        ctx.logger.info('Restoring plugins')
        plugins_to_install = self._get_plugins_to_install(existing_plugins)
        for tenant, plugins in plugins_to_install.items():
            client = get_rest_client(tenant=tenant)
            plugins_tmp = tempfile.mkdtemp()
            try:
                for plugin in plugins:
                    self._restore_plugin(client, tenant, plugin, plugins_tmp)
            finally:
                os.rmdir(plugins_tmp)
        ctx.logger.info('Successfully restored plugins')
예제 #41
0
 def _get_deployment_blueprint(deployment_id):
     new_blueprint = ""
     try:
         # get the latest deployment update to get the new blueprint id
         client = get_rest_client()
         dep_upd = \
             client.deployment_updates.list(deployment_id=deployment_id,
                                            sort='created_at')[-1]
         new_blueprint = \
             client.deployment_updates.get(dep_upd.id)[
                 "new_blueprint_id"]
     except KeyError:
         raise NonRecoverableError(
             "can't get blueprint for deployment {0}".format(
                 deployment_id))
     return new_blueprint
예제 #42
0
def update(params, configuration_node_type, node_types_to_update, **kwargs):
    ctx = workflow_ctx
    ctx.logger.info("Starting Update Workflow")

    restcli = manager.get_rest_client()

    node_types = set(node_types_to_update)
    # update interface on the config node
    graph = ctx.graph_mode()

    sequence = graph.sequence()
    for node in ctx.nodes:
        if configuration_node_type in node.type_hierarchy:
            for instance in node.instances:
                load_config_task = instance.execute_operation(
                    'cloudify.interfaces.lifecycle.configure',
                    allow_kwargs_override=True,
                    kwargs={'parameters': params})
                sequence.add(load_config_task)

    for node in ctx.nodes:
        if node_types.intersection(set(node.type_hierarchy)):
            for instance in node.instances:
                for relationship in instance.relationships:
                    operation_task = relationship.execute_target_operation(
                        'cloudify.interfaces.relationship_lifecycle'
                        '.preconfigure')
                    sequence.add(operation_task)

    graph.execute()

    sequence = graph.sequence()

    for node in ctx.nodes:
        if node_types.intersection(set(node.type_hierarchy)):
            for instance in node.instances:
                currentinstance = restcli.node_instances.get(instance.id)
                params = currentinstance.runtime_properties['params']
                if len(params['diff_params']) > 0:
                    ctx.logger.info(
                        "Updating instance ID: {} with diff_params {}".format(
                            instance.id, params['diff_params']))
                    operation_task = instance.execute_operation(
                        'cloudify.interfaces.lifecycle.update')
                    sequence.add(operation_task)

    return graph.execute()
예제 #43
0
def generate_token_and_port():

    # Command to get the service account name
    service_account_cmd_1 =\
        "kubectl -n kube-system get secret" \
        " | grep admin-user | awk \'{print $1}\'"

    # Execute the command and fetch the service account name
    output = fabric_api.run(service_account_cmd_1)

    # Retry in case the command fail
    if not output:
        raise NonRecoverableError('Failed to get the service account')

    # Command to get the associated bearer token with service account
    service_account_cmd_2 =\
        "kubectl -n kube-system describe secret {0}" \
        " | grep -E '^token' | cut -f2 -d':' | tr -d '\t'".format(
            output.strip())

    # Execute the command and fetch the token associated with account
    bearer_token = fabric_api.run(service_account_cmd_2)

    # Retry in case the command fail
    if not bearer_token:
        raise NonRecoverableError('Failed to get the bearer token')

    # Command to get the exposed port on which kubernetes ui is running
    exposed_port_cmd =\
        "kubectl -n kube-system get service kubernetes-dashboard" \
        " | awk 'FNR == 2 {print $5}' | cut -f2 -d':' | cut -f1 -d '/'"

    # Execute the command and get the output
    port = fabric_api.run(exposed_port_cmd)

    # Retry in case the command fail
    if not port:
        raise NonRecoverableError(
            'Failed to get the kubernetes dashboard port')

    # Set the generated token and set it as run time properties for  instance
    token = bearer_token.strip()
    client = get_rest_client()
    client.secrets.create('kubernetes_token', token, update_if_exists=True)
    ctx.instance.runtime_properties['bearer_token'] = token
    # Set the exposed port and set it as run time properties for instance
    ctx.instance.runtime_properties['dashboard_port'] = port.strip()
예제 #44
0
def get_managed_plugin(plugin):
    package_name = plugin.get('package_name')
    package_version = plugin.get('package_version')
    distribution = plugin.get('distribution')
    distribution_version = plugin.get('distribution_version')
    distribution_release = plugin.get('distribution_release')
    supported_platform = plugin.get('supported_platform')
    if not package_name:
        return None
    query_parameters = {'package_name': package_name}
    if package_version:
        query_parameters['package_version'] = package_version
    if distribution:
        query_parameters['distribution'] = distribution
    if distribution_version:
        query_parameters['distribution_version'] = distribution_version
    if distribution_release:
        query_parameters['distribution_release'] = distribution_release
    if supported_platform:
        query_parameters['supported_platform'] = supported_platform
    client = get_rest_client()
    plugins = client.plugins.list(**query_parameters)

    (current_platform,
     a_dist,
     a_dist_release) = _extract_platform_and_distro_info()

    if not supported_platform:
        plugins = [p for p in plugins
                   if p.supported_platform in ['any', current_platform]]
    if os.name != 'nt':
        if not distribution:
            plugins = [p for p in plugins
                       if p.supported_platform == 'any' or
                       p.distribution == a_dist]
        if not distribution_release:
            plugins = [p for p in plugins
                       if p.supported_platform == 'any' or
                       p.distribution_release == a_dist_release]

    if not plugins:
        return None

    # in case version was not specified, return the latest
    plugins.sort(key=lambda plugin: LooseVersion(plugin['package_version']),
                 reverse=True)
    return plugins[0]
예제 #45
0
def get_managed_plugin(plugin):
    package_name = plugin.get('package_name')
    package_version = plugin.get('package_version')
    if not package_name:
        return None
    query_parameters = {'package_name': package_name}
    if package_version:
        query_parameters['package_version'] = package_version
    client = get_rest_client()
    plugins = client.plugins.list(**query_parameters)

    supported_plugins = [p for p in plugins if _is_plugin_supported(p)]
    if not supported_plugins:
        return None

    return max(supported_plugins,
               key=lambda plugin: parse_version(plugin['package_version']))
예제 #46
0
    def _get_plugin(self, tenant_name, implementation):
        package_name = implementation.split('.')[0]
        filter_plugin = {'package_name': package_name}
        admin_api_token = get_admin_api_token()
        rest_client = get_rest_client(tenant=tenant_name,
                                      api_token=admin_api_token)
        plugins = rest_client.plugins.list(**filter_plugin)
        if not plugins:
            return {}

        plugins.sort(key=lambda p: StrictVersion(p.package_version),
                     reverse=True)
        return {
            'package_name': package_name,
            'package_version': plugins[0]['package_version'],
            'visibility': plugins[0]['visibility']
        }
예제 #47
0
def composer_db_schema_get_current_revision():
    """Get composer database schema revision.
    :returns: Current revision
    :rtype: str
    """
    client = manager.get_rest_client()
    version = client.manager.get_version()
    if version['edition'] != 'premium':
        return None
    output = subprocess.check_output([
        'sudo', '-u', snapshot_constants.COMPOSER_USER, '/usr/bin/npm', 'run',
        '--silent', '--prefix',
        os.path.join(snapshot_constants.COMPOSER_BASE_FOLDER,
                     'backend'), 'db-migrate-current'
    ]).decode('utf-8')
    revision = output.strip()
    return revision
예제 #48
0
def execute_workflow(deployment_id, workflow_id):
    ctx.logger.info("Entering execute_workflow event.")
    try:
        client = manager.get_rest_client()
        client.executions.start(deployment_id, workflow_id)
        ctx.logger.info("Workflow {0} started.".format(workflow_id))
        poll_until_with_timeout(check_if_deployment_is_ready(
            client, deployment_id),
                                expected_result=True,
                                timeout=900)
    except Exception as ex:
        ctx.logger.error("Error during deployment uninstall {0}. "
                         "Reason: {1}.".format(deployment_id, str(ex)))
        raise exceptions.NonRecoverableError(
            "Error during deployment uninstall {0}. "
            "Reason: {1}.".format(deployment_id, str(ex)))
    ctx.logger.info("Exiting execute_workflow event.")
def create_validation(**kwargs):
    ctx.logger.info("Entering create_validation event.")
    client = manager.get_rest_client()
    deployment_id = ctx.node.properties['deployment_id']
    if not deployment_id or deployment_id == '':
        ctx.logger.error("Malformed deployment ID.")
        raise exceptions.NonRecoverableError("Deployment ID is not specified.")
    try:
        client.deployments.get(deployment_id)
        ctx.logger.info("Success, deployment exists.")
    except Exception as ex:
        ctx.logger.error("Error during obtaining deployment {0}. "
                         "Reason: {1}.".format(deployment_id, str(ex)))
        raise exceptions.NonRecoverableError(
            "Error during obtaining deployment {0}. "
            "Reason: {1}.".format(deployment_id, str(ex)))
    ctx.logger.info("Exiting create_validation event.")
예제 #50
0
 def _restore_deployment_envs(self, postgres):
     deps = utils.get_dep_contexts(self._snapshot_version)
     token_info = postgres.get_deployment_creator_ids_and_tokens()
     failed_deployments = []
     for tenant, deployments in deps:
         ctx.logger.info(
             'Restoring deployment environments for {tenant}'.format(
                 tenant=tenant, ))
         tenant_client = get_rest_client(tenant=tenant)
         for deployment_id, dep_ctx in deployments.iteritems():
             try:
                 ctx.logger.info('Restoring deployment {dep_id}'.format(
                     dep_id=deployment_id, ))
                 api_token = self._get_api_token(
                     token_info[tenant][deployment_id])
                 with dep_ctx:
                     dep = tenant_client.deployments.get(deployment_id)
                     blueprint = tenant_client.blueprints.get(
                         dep_ctx.blueprint.id, )
                     tasks_graph = self._get_tasks_graph(
                         dep_ctx,
                         blueprint,
                         dep,
                         api_token,
                     )
                     tasks_graph.execute()
                     ctx.logger.info(
                         'Successfully created deployment environment '
                         'for deployment {deployment}'.format(
                             deployment=deployment_id, ))
             except RuntimeError as re:
                 if self.__should_ignore_deployment_failure(re.message):
                     ctx.logger.warning(
                         'Failed to create deployment: {0},'
                         'ignore_plugin_installation_failure'
                         'flag used, proceeding...'.format(deployment_id))
                     ctx.logger.debug(
                         'Deployment creation error: {0}'.format(re))
                     failed_deployments.append(deployment_id)
                 else:
                     raise re
         SnapshotRestore.__remove_failed_deployments_footprints(
             tenant_client, failed_deployments)
         SnapshotRestore.__log_message_for_deployment_restore(
             deployments, failed_deployments, tenant)
예제 #51
0
    def _get_agent_settings(self,
                            node_instance_id,
                            deployment_id,
                            tenant=None):
        """Get the cloudify_agent dict and the tenant dict of the agent.

        This returns cloudify_agent of the actual agent, possibly available
        via deployment proxying.
        """
        client = get_rest_client(tenant)
        node_instance = client.node_instances.get(node_instance_id)
        host_id = node_instance.host_id
        if host_id == node_instance_id:
            host_node_instance = node_instance
        else:
            host_node_instance = client.node_instances.get(host_id)
        cloudify_agent = host_node_instance.runtime_properties.get(
            'cloudify_agent', {})

        # we found the actual agent, just return it
        if cloudify_agent.get('queue') and cloudify_agent.get('name'):
            return cloudify_agent, self._get_tenant_dict(tenant, client)

        # this node instance isn't the real agent, check if it proxies to one
        node = client.nodes.get(deployment_id, host_node_instance.node_id)
        try:
            remote = node.properties['agent_config']['extra']['proxy']
            proxy_deployment = remote['deployment']
            proxy_node_instance = remote['node_instance']
            proxy_tenant = remote.get('tenant')
        except KeyError:
            # no queue information and no proxy - cannot continue
            missing = 'queue' if not cloudify_agent.get('queue') else 'name'
            raise exceptions.NonRecoverableError(
                'Missing cloudify_agent.{0} runtime information. '
                'This most likely means that the Compute node was '
                'never started successfully'.format(missing))
        else:
            # the agent does proxy to another, recursively get from that one
            # (if the proxied-to agent in turn proxies to yet another one,
            # look up that one, etc)
            return self._get_agent_settings(
                node_instance_id=proxy_node_instance,
                deployment_id=proxy_deployment,
                tenant=proxy_tenant)
예제 #52
0
def stage_db_schema_get_current_revision():
    """Get stage database schema revision.

    :returns: Current revision
    :rtype: str

    """
    client = manager.get_rest_client()
    version = client.manager.get_version()
    if version['edition'] != 'premium':
        return None
    output = subprocess.check_output([
        '/opt/nodejs/bin/node',
        '/opt/cloudify-stage/backend/migration.js',
        'current',
    ])
    revision = output.strip()
    return revision
예제 #53
0
def get_secret(manager_host, tenant_name, manager_username, manager_password,
               secret_name, **kwargs):
    value = None
    try:
        '''
        resp = requests.get('http://'+manager_host+'/api/v3.1/secrets/'+secret_name,
                            headers={'Tenant':tenant_name},auth=(manager_username, manager_password))
        ctx.logger.debug('response content {}'.format(resp.content))
        value = json.loads(resp.content)
        '''
        client = get_rest_client()
        value = client.secrets.get(key=secret_name)
        ctx.logger.debug('#response content {}'.format(value))

    except Exception as e:
        raise NonRecoverableError('Exception happned {}'.format(
            getattr(e, 'message', repr(e))))
    return value
예제 #54
0
def _is_gateway_locked(ctx):
    rest = None
    try:
        rest = get_rest_client()
    except KeyError:
        pass
    if rest:
        node_instances = rest.node_instances.list(ctx.deployment.id)
    elif ctx.deployment.id == 'local':
        storage = ctx._endpoint.storage
        node_instances = storage.get_node_instances()
    else:
        return False
    for instance in node_instances:
            rt_properties = instance['runtime_properties']
            if rt_properties.get(GATEWAY_LOCK):
                return True
    return False
예제 #55
0
def install(ctx, plugin, **_):
    try:
        execution_result = _operate_on_plugin(ctx, plugin, 'install')
    except Exception as e:
        ctx.send_event("The plugin '{0}' failed to install. "
                       "Sending a 'force plugin uninstall' request..."
                       "".format(plugin['id']))
        client = get_rest_client()
        plugins = client.plugins.list(id=plugin['id'])
        if plugins:
            client.plugins.delete(plugin_id=plugin['id'], force=True)
            ctx.send_event("Sent a 'force plugin uninstall' request for "
                           "plugin '{0}'.".format(plugin['id']))
        else:
            ctx.send_event("The plugin {0} entry doesn't exist."
                           "".format(plugin['id']))
        raise e
    return execution_result
예제 #56
0
    def __init__(self, operation_inputs):
        full_operation_name = ctx.operation.name
        self.operation_name = full_operation_name.split('.').pop()

        # Cloudify client setup
        self.client_config = self._get_desired_operation_input(
            'client', operation_inputs)

        if self.client_config:
            self.client = CloudifyClient(**self.client_config)
        else:
            self.client = manager.get_rest_client()

        self.config = self._get_desired_operation_input(
            'resource_config', operation_inputs)

        self.deployment = self.config.get('deployment', '')
        self.deployment_id = self.deployment.get('id', '')
예제 #57
0
def restore(snapshot_id, recreate_deployments_envs, config, force, **kwargs):
    mappings = {
        '3.3': _restore_snapshot_format_3_3,
        '3.2': _restore_snapshot_format_3_2
    }

    config = _DictToAttributes(config)

    _assert_clean_elasticsearch(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')

    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(file_server_root,
                                     config.file_server_snapshots_folder)

        snapshot_path = os.path.join(snapshots_dir, snapshot_id,
                                     '{0}.zip'.format(snapshot_id))

        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)

        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)

        from_version = metadata[_M_VERSION]

        if from_version not in mappings:
            raise NonRecoverableError('Manager is not able to restore snapshot'
                                      ' of manager {0}'.format(from_version))
        client = get_rest_client()
        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.send_event(
            'Starting restoring snapshot of manager {0}'.format(from_version))
        mappings[from_version](config, tempdir, metadata)

        if recreate_deployments_envs:
            recreate_deployments_environments(existing_deployments_ids)

        ctx.send_event('Successfully restored snapshot of manager {0}'.format(
            from_version))
    finally:
        shutil.rmtree(tempdir)
예제 #58
0
def restore(snapshot_id, recreate_deployments_envs, config, force, timeout,
            **kwargs):
    ctx.logger.info('Restoring snapshot {0}'.format(snapshot_id))
    ctx.logger.debug('Restoring snapshot config: {0}'.format(config))
    config = DictToAttributes(config)

    _assert_clean_postgres(log_warning=force)

    tempdir = tempfile.mkdtemp('-snapshot-data')
    try:
        file_server_root = config.file_server_root
        snapshots_dir = os.path.join(file_server_root,
                                     config.file_server_snapshots_folder)
        snapshot_path = os.path.join(snapshots_dir, snapshot_id,
                                     '{0}.zip'.format(snapshot_id))
        with zipfile.ZipFile(snapshot_path, 'r') as zipf:
            zipf.extractall(tempdir)
        with open(os.path.join(tempdir, _METADATA_FILE), 'r') as f:
            metadata = json.load(f)
        client = get_rest_client()
        manager_version = _get_manager_version(client)
        from_version = ManagerVersion(metadata[_M_VERSION])
        ctx.logger.info('Manager version = {0}, snapshot version = {1}'.format(
            str(manager_version), str(from_version)))
        if from_version.greater_than(manager_version):
            raise NonRecoverableError(
                'Cannot restore a newer manager\'s snapshot on this manager '
                '[{0} > {1}]'.format(str(from_version), str(manager_version)))
        existing_deployments_ids = [d.id for d in client.deployments.list()]
        ctx.logger.info(
            'Starting restoring snapshot of manager {0}'.format(from_version))

        version_at_least_4 = _version_at_least(from_version, '4.0.0')
        plugins_to_install = _restore_snapshot(config, tempdir, metadata,
                                               timeout, version_at_least_4)
        if plugins_to_install:
            _install_plugins(plugins_to_install)
        if recreate_deployments_envs:
            _recreate_deployments_environments(existing_deployments_ids)
        ctx.logger.info('Successfully restored snapshot of manager {0}'.format(
            from_version))
    finally:
        ctx.logger.debug('Removing temp dir: {0}'.format(tempdir))
        shutil.rmtree(tempdir)
예제 #59
0
    def _restore_deployment_envs(self, postgres):
        deps = utils.get_dep_contexts(self._snapshot_version)
        token_info = postgres.get_deployment_creator_ids_and_tokens()
        deps_with_failed_plugins = Queue.Queue()
        failed_deployments = Queue.Queue()
        threads = list()

        for tenant, deployments in deps:
            ctx.logger.info(
                'Restoring deployment environments for {tenant}'.format(
                    tenant=tenant,
                )
            )
            tenant_client = get_rest_client(tenant=tenant)

            for deployment_id, dep_ctx in deployments.iteritems():
                # Task graph is created and executed by threads to
                # shorten restore time significantly
                wf_ctx = current_workflow_ctx.get_ctx()
                wf_parameters = current_workflow_ctx.get_parameters()
                self._semaphore.acquire()
                t = threading.Thread(target=self._get_and_execute_task_graph,
                                     args=(token_info, deployment_id, dep_ctx,
                                           tenant, tenant_client, wf_ctx,
                                           wf_parameters,
                                           deps_with_failed_plugins,
                                           failed_deployments)
                                     )
                t.setDaemon(True)
                threads.append(t)
                t.start()

        for t in threads:
            t.join()

        if not failed_deployments.empty():
            deployments = list(failed_deployments.queue)
            raise NonRecoverableError('Failed to restore snapshot, the '
                                      'following deployment environments were'
                                      ' not restored: {0}. See exception'
                                      ' tracebacks logged above for more'
                                      ' details.'.format(deployments))

        self._log_final_information(deps_with_failed_plugins)
예제 #60
0
 def _wagon_install(self, plugin, args):
     client = get_rest_client()
     wagon_dir = tempfile.mkdtemp(prefix='{0}-'.format(plugin.id))
     wagon_path = os.path.join(wagon_dir, 'wagon.tar.gz')
     try:
         self.logger.debug('Downloading plugin {0} from manager into {1}'
                           .format(plugin.id, wagon_path))
         client.plugins.download(plugin_id=plugin.id,
                                 output_file=wagon_path)
         self.logger.debug('Installing plugin {0} using wagon'
                           .format(plugin.id))
         w = wagon.Wagon(source=wagon_path)
         w.install(ignore_platform=True,
                   install_args=args,
                   virtualenv=VIRTUALENV)
     finally:
         self.logger.debug('Removing directory: {0}'
                           .format(wagon_dir))
         self._rmtree(wagon_dir)