def start(ctx, **_):
    """Update IP runtime property"""
    azure_config = ctx.node.properties.get('azure_config')
    if not azure_config.get("subscription_id"):
        azure_config = ctx.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    name = ctx.instance.runtime_properties.get('name')
    resource_group_name = utils.get_resource_group(ctx)
    api_version = \
        ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
    public_ip_address = PublicIPAddress(azure_config, ctx.logger, api_version)
    try:
        result = public_ip_address.get(resource_group_name, name)
        ctx.instance.runtime_properties[PUBLIC_IP_PROPERTY] = \
            result.get('ip_address')
    except CloudError:
        raise cfy_exc.NonRecoverableError(
            "Resource with name {0} doesn't exist".format(name))
def detach_nic_from_backend_pool(ctx, **_):
    """
        Detaches a Network Interface Card's IPConfigurations
        from a Load Balancer Backend Pool
    """
    # Get the ID of the Backend Pool
    be_pool_id = {'id': ctx.target.instance.runtime_properties['resource_id']}
    # Get an interface to the Network Interface Card
    azure_config = ctx.source.node.properties['azure_config']
    if not azure_config.get("subscription_id"):
        azure_config = ctx.source.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    resource_group_name = ctx.source.node.properties['resource_group_name']
    name = ctx.source.instance.runtime_properties['name']
    network_interface_card = NetworkInterfaceCard(azure_config, ctx.logger)
    # Get the existing NIC IPConfigurations
    nic_data = network_interface_card.get(resource_group_name, name)
    nic_ip_cfgs = nic_data.get('ip_configurations', list())
    # Remove the Backend Pool from the NIC IPConfigurations
    for ip_idx, _ in enumerate(nic_ip_cfgs):
        nic_pools = nic_ip_cfgs[ip_idx].get(LB_ADDRPOOLS_KEY, list())
        for pool_idx, nic_pool in enumerate(nic_pools):
            if nic_pool != be_pool_id:
                continue
            del nic_pools[pool_idx]
            nic_ip_cfgs[ip_idx][LB_ADDRPOOLS_KEY] = nic_pools
    # Update the NIC IPConfigurations
    nic_params = {
        'ip_configurations': nic_ip_cfgs
    }
    try:
        network_interface_card.create_or_update(resource_group_name, name,
                                                nic_params)
    except CloudError as cr:
        raise cfy_exc.NonRecoverableError(
            "detach nic_to_backend_pool '{0}' "
            "failed with this error : {1}".format(name,
                                                  cr.message)
            )
def create(ctx, **_):
    """Uses an existing, or creates a new, Network Security Group"""
    # Create a resource (if necessary)
    azure_config = ctx.node.properties.get('azure_config')
    if not azure_config.get("subscription_id"):
        azure_config = ctx.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    name = utils.get_resource_name(ctx)
    resource_group_name = utils.get_resource_group(ctx)
    nsg_params = {
        'location': ctx.node.properties.get('location'),
        'tags': ctx.node.properties.get('tags'),
    }
    nsg_params = \
        utils.handle_resource_config_params(nsg_params,
                                            ctx.node.properties.get(
                                                'resource_config', {}))
    api_version = \
        ctx.node.properties.get('api_version', constants.API_VER_NETWORK)
    network_security_group = NetworkSecurityGroup(azure_config, ctx.logger,
                                                  api_version)
    # clean empty values from params
    nsg_params = \
        utils.cleanup_empty_params(nsg_params)

    try:
        result = \
            network_security_group.create_or_update(
                resource_group_name,
                name,
                nsg_params)
    except CloudError as cr:
        raise cfy_exc.NonRecoverableError(
            "create network_security_group '{0}' "
            "failed with this error : {1}".format(name, cr.message))

    ctx.instance.runtime_properties['resource_group'] = resource_group_name
    ctx.instance.runtime_properties['resource'] = result
    ctx.instance.runtime_properties['resource_id'] = result.get("id", "")
示例#4
0
def detach_route_table(ctx, **_):
    """Detaches a Route Table to the Subnet"""
    # Detach
    azure_config = ctx.target.node.properties.get('azure_config')
    if not azure_config.get("subscription_id"):
        azure_config = ctx.target.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    resource_group_name = utils.get_resource_group(ctx.target)
    vnet_name = ctx.target.instance.runtime_properties.get('virtual_network')
    name = ctx.target.instance.runtime_properties.get('name')
    subnet_params = {'route_table': None}
    subnet = Subnet(azure_config, ctx.logger)
    try:
        subnet.create_or_update(resource_group_name, vnet_name, name,
                                subnet_params)
    except CloudError as cr:
        raise cfy_exc.NonRecoverableError(
            "detach_route_table from subnet '{0}' "
            "failed with this error : {1}".format(name, cr.message))
示例#5
0
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        # If we do not have a real manager cloudify_agent is expected to be an
        # empty dict. This will be handled by get_broker_credentials.
        cloudify_agent = ctx.bootstrap_context.cloudify_agent

        broker_user, broker_pass = utils.internal.get_broker_credentials(
            cloudify_agent
        )

        config_changes = {
            'server': cloudify_agent.broker_ip,
            'user': broker_user,
            'password': broker_pass,
        }

        handlers['cloudify_handler.cloudify.CloudifyHandler'][
            'config'].update(config_changes)

    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop.keys():
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path, '{0}.conf'.format(
            name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return handlers.keys()
示例#6
0
def create(vca_client, **kwargs):
    """
        create server by template,
        if external_resource set return without creation,
        e.g.:
        {
            'management_network': '_management_network',
            'server': {
                'template': 'template',
                'catalog': 'catalog',
                'guest_customization': {
                    'pre_script': 'pre_script',
                    'post_script': 'post_script',
                    'admin_password': '******',
                    'computer_name': 'computer'

                }
            }
        }
    """
    config = get_vcloud_config()
    server = {
        'name': ctx.instance.id,
    }
    server.update(ctx.node.properties.get('server', {}))
    transform_resource_name(server, ctx)

    if ctx.node.properties.get('use_external_resource'):
        res_id = ctx.node.properties['resource_id']
        ctx.instance.runtime_properties[VCLOUD_VAPP_NAME] = res_id
        vdc = vca_client.get_vdc(config['vdc'])
        if not vca_client.get_vapp(vdc, res_id):
            raise cfy_exc.NonRecoverableError(
                "Unable to find external vAPP server resource {0}."
                .format(res_id))
        server.update({'name': res_id})
        ctx.logger.info(
            "External resource {0} has been used".format(res_id))
    else:
        _create(vca_client, config, server)
def creation_validation(vca_client, **kwargs):
    """
        validate firewall rules for node
    """
    getaway = get_gateway(vca_client, _get_gateway_name(ctx.node.properties))
    if not getaway.is_fw_enabled():
        raise cfy_exc.NonRecoverableError(
            "Gateway firewall is disabled. Please, enable firewall.")
    rules = get_mandatory(ctx.node.properties, 'rules')
    for rule in rules:
        description = rule.get("description")
        if description and not isinstance(description, basestring):
            raise cfy_exc.NonRecoverableError(
                "Parameter 'description' must be string.")

        source = rule.get("source")
        if source:
            if not isinstance(source, basestring):
                raise cfy_exc.NonRecoverableError(
                    "Parameter 'source' must be valid IP address string.")
            if not _is_literal_ip(source):
                check_ip(source)

        utils.check_port(rule.get('source_port'))

        destination = rule.get('destination')
        if destination:
            if not isinstance(destination, basestring):
                raise cfy_exc.NonRecoverableError(
                    "Parameter 'destination' must be valid IP address string.")
            if not _is_literal_ip(destination):
                check_ip(destination)

        utils.check_port(rule.get('destination_port'))

        utils.check_protocol(rule.get('protocol'))

        action = get_mandatory(rule, "action")
        if (not isinstance(action, basestring)
                or action.lower() not in ACTIONS):
            raise cfy_exc.NonRecoverableError(
                "Action must be on of{0}.".format(ACTIONS))

        log = rule.get('log_traffic')
        if log and not isinstance(log, bool):
            raise cfy_exc.NonRecoverableError(
                "Parameter 'log_traffic' must be boolean.")
def delete_rule(ctx, **_):
    """
        Deletes a Load Balancer Rule
        TODO: Rewrite this to occur inside of a Relationship Operation
    """
    if ctx.node.properties.get('use_external_resource', False):
        return
    # Get an interface to the Load Balancer
    azure_config = ctx.node.properties.get('azure_config')
    if not azure_config.get("subscription_id"):
        azure_config = ctx.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    resource_group_name = utils.get_resource_group(ctx)
    lb_rel = utils.get_relationship_by_type(
        ctx.instance.relationships,
        constants.REL_CONTAINED_IN_LB)
    lb_name = utils.get_resource_name(lb_rel.target)
    load_balancer = LoadBalancer(azure_config, ctx.logger)
    name = ctx.instance.runtime_properties.get('name')
    # Get the existing rules
    lb_data = load_balancer.get(resource_group_name, lb_name)
    lb_rules = lb_data.get('load_balancing_rules', list())
    for idx, rule in enumerate(lb_rules):
        if rule.get('name') == name:
            del lb_rules[idx]
    # Update the Load Balancer with the new rules list
    lb_params = {
        'load_balancing_rules': lb_rules
    }
    try:
        load_balancer.create_or_update(resource_group_name, lb_name, lb_params)
    except CloudError as cr:
        raise cfy_exc.NonRecoverableError(
            "delete load_balancing_rules '{0}' "
            "failed with this error : {1}".format(name,
                                                  cr.message)
            )
def create(ctx, **_):
    """Uses an existing, or creates a new, Availability Set"""
    azure_config = ctx.node.properties.get('azure_config')
    if not azure_config.get("subscription_id"):
        azure_config = ctx.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    name = utils.get_resource_name(ctx)
    resource_group_name = utils.get_resource_group(ctx)
    resource_config = ctx.node.properties.get('resource_config')
    availability_set_conf = {
        'location': ctx.node.properties.get('location'),
        'tags': ctx.node.properties.get('tags'),
    }
    availability_set_conf = \
        utils.handle_resource_config_params(availability_set_conf,
                                            resource_config)
    api_version = \
        ctx.node.properties.get('api_version', constants.API_VER_COMPUTE)
    availability_set = AvailabilitySet(azure_config, ctx.logger, api_version)
    # clean empty values from params
    availability_set_conf = utils.cleanup_empty_params(availability_set_conf)

    try:
        result = \
            availability_set.create_or_update(resource_group_name,
                                              name,
                                              availability_set_conf)
    except CloudError as cr:
        raise cfy_exc.NonRecoverableError(
            "create availability_set '{0}' "
            "failed with this error : {1}".format(name,
                                                  cr.message)
            )

    ctx.instance.runtime_properties['resource_group'] = resource_group_name
    ctx.instance.runtime_properties['resource'] = result
    ctx.instance.runtime_properties['resource_id'] = result.get("id", "")
def _stream_download(ctx, conn, volume, url):
    res = requests.head(url, allow_redirects=True)
    res.raise_for_status()
    allocation = int(res.headers.get('Content-Length', 0))
    if allocation <= 0 or res.headers.get('Accept-Ranges') != 'bytes':
        raise cfy_exc.NonRecoverableError(
            'Failed to download volume.'
        )
    ctx.logger.info("Download: {allocation}"
                    .format(allocation=allocation))

    stream = conn.newStream(0)
    volume.upload(stream, 0, allocation, 0)
    start_range = 0
    while start_range < allocation:
        stop_range = start_range + STEP_DOWNLOAD
        if stop_range > (allocation - 1):
            stop_range = allocation - 1
        ctx.logger.info(
            "Range: {start}..{stop}/{allocation}: {place}%"
            .format(
                start=start_range,
                stop=stop_range,
                allocation=allocation,
                place=(100 * stop_range)/allocation))
        res = requests.get(
            url,
            headers={
                "Range": "bytes={start}-{stop}".format(
                    start=start_range,
                    stop=stop_range)},
            allow_redirects=True,
            stream=True)
        res.raise_for_status()
        for chunk in res.iter_content(chunk_size=None):
            # mark as downloaded
            start_range += len(chunk)
            stream.send(chunk)
    stream.finish()
示例#11
0
def resize(storage_client, **kwargs):
    vm_id = ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_ID]
    vm_name = ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_NAME]
    storage_file_name = \
        ctx.instance.runtime_properties[VSPHERE_STORAGE_FILE_NAME]
    storage_size = ctx.instance.runtime_properties.get('storage_size')
    if not storage_size:
        raise cfy_exc.NonRecoverableError(
            'Error during trying to resize storage: new storage size wasn\'t'
            ' specified.')
    ctx.logger.info("Resizing storage {file} on {vm} to {new_size}".format(
        file=storage_file_name,
        vm=vm_name,
        new_size=storage_size,
    ))
    storage_client.resize_storage(vm_id, storage_file_name, storage_size)
    ctx.logger.info(
        "Successfully resized storage {file} on {vm} to {new_size}".format(
            file=storage_file_name,
            vm=vm_name,
            new_size=storage_size,
        ))
示例#12
0
 def __call__(self, subgraph):
     graph = subgraph.graph
     for task in subgraph.tasks.values():
         subgraph.remove_task(task)
     if not subgraph.containing_subgraph:
         result = workflow_tasks.HandlerResult.retry()
         if self.on_retry == 'reinstall':
             result.retried_task = reinstall_node_instance_subgraph(
                 self.instance, graph)
         elif self.on_retry == 'uninstall':
             result.retried_task = uninstall_node_instance_subgraph(
                 self.instance, graph)
         else:
             raise exceptions.NonRecoverableError(
                 'subgraph {0} on_failure: unknown retry method {1}'.format(
                     subgraph, self.on_retry))
         result.retried_task.current_retries = subgraph.current_retries + 1
     else:
         result = workflow_tasks.HandlerResult.ignore()
         subgraph.containing_subgraph.failed_task = subgraph.failed_task
         subgraph.containing_subgraph.set_state(workflow_tasks.TASK_FAILED)
     return result
def get_deployments_from_group(group, rest_client):
    """ Get a deployment group object.
    :param group: The ID of the group.
    :type group: str
    :param rest_client: A Cloudify REST client.
    :type rest_client: cloudify_rest_client.client.CloudifyClient
    :return: request's JSON response
    :rtype: dict
    """
    attempts = 0
    while True:
        try:
            return rest_client.deployment_groups.get(group)
        except CloudifyClientError as e:
            attempts += 1
            if attempts > 15:
                raise cfy_exc.NonRecoverableError(
                    'Maximum attempts waiting '
                    'for deployment group {group}" {e}.'.format(group=group,
                                                                e=e))
            sleep(5)
            continue
示例#14
0
def stop_diamond(conf_path):
    config_file = os.path.join(conf_path, CONFIG_NAME)
    pid = get_pid(config_file)
    if pid:
        need_kill = True
        try:
            diamond_process = Process(pid)
            diamond_process.terminate()
            diamond_process.wait(timeout=DEFAULT_TIMEOUT)
            need_kill = diamond_process.is_running()
        except Error:
            pass
        if need_kill:
            call(["sudo", "kill", str(pid)])
            # diamond deletes the pid file, even if killed
            for _ in range(DEFAULT_TIMEOUT):
                pid = get_pid(config_file)
                if not pid:
                    return
                sleep(1)
    else:
        raise exceptions.NonRecoverableError('Failed reading diamond pid file')
示例#15
0
def attach_network_security_group(ctx, **_):
    """Attaches a Network Security Group (source) to the Subnet (target)"""
    nsg_id = ctx.source.instance.runtime_properties.get("resource_id", "")
    # Attach
    azure_config = ctx.target.node.properties.get('azure_config')
    if not azure_config.get("subscription_id"):
        azure_config = ctx.target.node.properties.get('client_config')
    else:
        ctx.logger.warn("azure_config is deprecated please use client_config, "
                        "in later version it will be removed")
    resource_group_name = utils.get_resource_group(ctx.target)
    vnet_name = ctx.target.instance.runtime_properties.get('virtual_network')
    name = ctx.target.instance.runtime_properties.get('name')
    subnet_params = {'network_security_group': {'id': nsg_id}}
    subnet = Subnet(azure_config, ctx.logger)
    try:
        subnet.create_or_update(resource_group_name, vnet_name, name,
                                subnet_params)
    except CloudError as cr:
        raise cfy_exc.NonRecoverableError(
            "attach_network_security_group to subnet '{0}' "
            "failed with this error : {1}".format(name, cr.message))
示例#16
0
def wait_for_deployment(**kwargs):
    ctx.logger.info("Entering wait_for_deployment event.")
    if 'deployment_id' not in ctx.node.properties:
        raise exceptions.NonRecoverableError("Deployment ID not specified.")

    client = manager.get_rest_client()
    timeout = ctx.node.properties['timeout']
    deployment_id = ctx.node.properties['deployment_id']

    def _check_if_deployment_is_ready():
        _execs = client.executions.list(deployment_id=deployment_id)
        ctx.logger.info("Deployment executions statuses: {0}.".format(
            str([[_e['workflow_id'], _e['status']] for _e in _execs])))
        ctx.logger.info("Are all executions were finished? {0}".format(
            [str(_e['status']) == "terminated" for _e in _execs]))
        return any([str(_e['status']) == "terminated" for _e in _execs])

    poll_until_with_timeout(_check_if_deployment_is_ready,
                            expected_result=True,
                            timeout=timeout)

    ctx.logger.info("Exiting wait_for_deployment event.")
示例#17
0
    def _serialize_handler(self, handler):
        """Serialize the on_failure/on_success handler.

        For functions, just their qualified import name will be stored
        (and later restored by simply importing it).
        For class instances, the qualified import name and optionally the
        __init__ kwargs returned by a .dump method on that class.
        """
        if not handler:
            return None
        if isinstance(handler, types.FunctionType):
            path = '{0}.{1}'.format(handler.__module__, handler.__name__)
        else:
            path = '{0}.{1}'.format(handler.__class__.__module__,
                                    handler.__class__.__name__)
        if '<' in path:  # eg. "f.<locals>.g"
            raise exceptions.NonRecoverableError(
                'Cannot serialize handler {0}'.format(handler))
        serialized = {'path': path}
        if hasattr(handler, 'dump'):
            serialized.update(handler.dump())
        return serialized
示例#18
0
def config_handlers(ctx, handlers, config_path, handlers_path):
    """
    create handler configuration files.
    copy over handler if path to file was provided.
    return list of active handlers.
    """
    if handlers is None:
        handlers = copy_objects.deepcopy(DEFAULT_HANDLERS)

        agent_workdir = _calc_workdir()
        conf_file_path = os.path.join(agent_workdir, 'broker_config.json')
        if os.path.isfile(conf_file_path):
            with open(conf_file_path) as conf_handle:
                agent_config = json.load(conf_handle)

            config_changes = {
                'server': agent_config['broker_hostname'],
                'user': agent_config['broker_username'],
                'password': agent_config['broker_password'],
                'broker_cert_path': agent_config['broker_cert_path'],
                'broker_ssl_enabled': agent_config['broker_ssl_enabled'],
            }

            handlers['cloudify_handler.cloudify.CloudifyHandler'][
                'config'].update(config_changes)
    elif not handlers:
        raise exceptions.NonRecoverableError('Empty handlers dict')

    for name, prop in handlers.items():
        if 'path' in prop:
            handler_file = os.path.join(handlers_path,
                                        '{0}.py'.format(name.split('.')[-2]))
            ctx.download_resource(prop['path'], handler_file)

        path = os.path.join(config_path,
                            '{0}.conf'.format(name.split('.')[-1]))
        write_config(path, prop.get('config', {}))

    return list(handlers.keys())
示例#19
0
def nsx_login(kwargs):
    """Use values form properties/of file for login to nsx"""
    if ctx.type == NODE_INSTANCE:
        nsx_auth = _get_properties('nsx_auth', kwargs)
    else:
        nsx_auth = kwargs.get('nsx_auth')

    ctx.logger.info("NSX login...")

    # get file config
    cfg_auth = _nsx_login_file()
    cfg_auth.update(nsx_auth)

    # check values
    user = cfg_auth.get('username')
    password = cfg_auth.get('password')
    ip = cfg_auth.get('host')

    # if node contained in some other node, try to overwrite ip
    if not ip and ctx.type == NODE_INSTANCE:
        ip = ctx.instance.host_ip
        ctx.logger.info("Used host from container: %s" % ip)

    ctx.logger.info("Used %s@%s" % (user, ip))

    # check minimal amout of credentials
    if not ip or not user or not password:
        raise cfy_exc.NonRecoverableError("please check your credentials")

    raml_file = cfg_auth.get('raml')
    if not raml_file:
        resource_dir = resource_filename(__name__, 'api_spec')
        raml_file = '{}/nsxvapi.raml'.format(resource_dir)
        ctx.logger.info("Will be used internal: %s" % raml_file)

    client = NsxClient(raml_file, ip, user, password)
    ctx.logger.info("NSX logged in")
    return client
示例#20
0
 def _validate_not_cancelled(handler, ctx):
     """
     This method will validate if the current running tasks is cancelled
     or not
     :param handler:
     :param ctx:
     """
     # We need also to handle old tasks still in queue and not picked by
     # the worker so that we can ignore them as the state of the
     # execution is cancelled and ignore pending tasks picked by the
     # worker but still not executed. Morever,we need to handle a case when
     # resume workflow is running while there are some old operations
     # tasks still in the queue which holds an invalid execution token
     # which could raise 401 error
     # Need to use the context associated with the that task
     with state.current_ctx.push(handler.ctx):
         try:
             # Get the status of the current execution so that we can
             # tell if the current running task can be run or not
             current_execution = handler.ctx.get_execution(
                 ctx.get('execution_id'))
             if current_execution:
                 logger.info('The current status of the execution is {0}'
                             ''.format(current_execution.status))
                 # If the current execution task is cancelled, that means
                 # some this current task was on the queue when the previous
                 # cancel operation triggered, so we need to ignore running
                 # such tasks from the previous execution which was
                 # cancelled
                 if current_execution.status == ExecutionState.CANCELLED:
                     raise exceptions.ProcessKillCancelled()
             else:
                 raise exceptions.NonRecoverableError(
                     'No execution available')
         except UserUnauthorizedError:
             # This means that Execution token is no longer valid since
             # there is a new token re-generated because of resume workflow
             raise exceptions.ProcessKillCancelled()
def install_deployment(deployment_id, rest_client):
    """ Execute install workflow on a deployment.
    :param deployment_id: An existing deployment ID.
    :type deployment_id: str
    :param rest_client: A Cloudify REST client.
    :type rest_client: cloudify_rest_client.client.CloudifyClient
    :return: request's JSON response
    :rtype: dict
    """
    attempts = 0
    while True:
        try:
            return rest_client.executions.start(deployment_id, 'install')
        except (DeploymentEnvironmentCreationPendingError,
                DeploymentEnvironmentCreationInProgressError) as e:
            attempts += 1
            if attempts > 15:
                raise cfy_exc.NonRecoverableError(
                    'Maximum attempts waiting '
                    'for deployment {deployment_id}" {e}.'.format(
                        deployment_id=deployment_id, e=e))
            sleep(5)
            continue
def handle_userdata(server):

    existing_userdata = server.get('userdata')
    install_agent_userdata = ctx.agent.init_script()

    if not (existing_userdata or install_agent_userdata):
        return

    if isinstance(existing_userdata, dict):
        ud_type = existing_userdata['type']
        if ud_type not in userdata_handlers:
            raise exceptions.NonRecoverableError(
                "Invalid type '{0}' for server userdata)".format(ud_type))
        existing_userdata = userdata_handlers[ud_type](existing_userdata)

    if not existing_userdata:
        final_userdata = install_agent_userdata
    elif not install_agent_userdata:
        final_userdata = existing_userdata
    else:
        final_userdata = create_multi_mimetype_userdata(
            [existing_userdata, install_agent_userdata])
    server['userdata'] = final_userdata
示例#23
0
def start(vca_client, **kwargs):
    """
    power on server and wait network connection availability for host
    """
    if ctx.node.properties.get('use_external_resource'):
        ctx.logger.info('not starting server since an external server is '
                        'being used')
    else:
        vapp_name = get_vapp_name(ctx.instance.runtime_properties)
        config = get_vcloud_config()
        vdc = vca_client.get_vdc(config['vdc'])
        vapp = vca_client.get_vapp(vdc, vapp_name)
        if _vapp_is_on(vapp) is False:
            ctx.logger.info("Power-on VApp {0}".format(vapp_name))
            task = vapp.poweron()
            if not task:
                raise cfy_exc.NonRecoverableError("Could not power-on vApp")
            wait_for_task(vca_client, task)

    if not _get_state(vca_client):
        return ctx.operation.retry(
            message="Waiting for VM's configuration to complete",
            retry_after=5)
示例#24
0
def create(ctx, **kwargs):
    properties = {}
    properties.update(ctx.node.properties)
    properties.update(kwargs)

    ctx.logger.info("Create: {0}".format(repr(properties['name'])))

    deployment = Deployment(ctx.logger,
                            properties['azure_config'],
                            properties['name'],
                            timeout=properties.get('timeout'))

    if ctx.node.properties.get('use_external_resource', False):
        ctx.logger.info("Using external resource")
    else:
        # load template
        template = properties.get('template')
        if not template and properties.get('template_file'):
            ctx.logger.info("Using {0} as template".format(
                repr(properties['template_file'])))
            template = ctx.get_resource(properties['template_file'])
            template = json.loads(template)

        if not template:
            raise cfy_exc.NonRecoverableError(
                "A deployment template is not defined.")

        # create deployment
        deployment.create(location=properties['location'])
        ctx.instance.runtime_properties['resource_id'] = properties['name']

        # update deployment
        deployment.update(template=template,
                          params=properties.get('params', {}))

    resource = deployment.get()
    ctx.instance.runtime_properties['outputs'] = resource.properties.outputs
示例#25
0
    def test_delete_object_call_with_exception(self):
        """Check nsx_common.attempt_with_rerun func: call with exception"""
        self._regen_ctx()
        self.fake_ctx.instance.runtime_properties['resource_id'] = 'r_id'
        kwargs = {
            'a': {'b': 'c'},
            'nsx_auth': {
                'username': '******',
                'password': '******',
                'host': 'host',
                'raml': 'raml'
            }
        }
        fake_client = mock.MagicMock(
            side_effect=cfy_exc.NonRecoverableError()
        )
        with mock.patch(
            'cloudify_nsx.library.nsx_common.NsxClient',
            fake_client
        ):

            with self.assertRaises(cfy_exc.NonRecoverableError):
                common.delete_object(None, 'a', kwargs, ['d', 'm'])

            fake_client.assert_called_with(
                'raml', 'host', 'username', 'password'
            )
            runtime = self.fake_ctx.instance.runtime_properties
            self.assertEqual(runtime['resource_id'], 'r_id')
            self.assertEqual(
                runtime['nsx_auth'], {
                    'username': '******',
                    'password': '******',
                    'host': 'host',
                    'raml': 'raml'
                }
            )
def nat_network_operation(vca_client, gateway, operation, rule_type, public_ip,
                          private_ip, original_port, translated_port,
                          protocol):
    """
        create/drop nat rule for current network
    """
    if operation == CREATE:
        new_original_port = _get_original_port_for_create(
            gateway, rule_type, public_ip, original_port, private_ip,
            translated_port, protocol)
        function = gateway.add_nat_rule
        message = "Add"
    elif operation == DELETE:
        new_original_port = _get_original_port_for_delete(
            public_ip, original_port)
        function = gateway.del_nat_rule
        message = "Remove"
    else:
        raise cfy_exc.NonRecoverableError(
            "Unknown operation: {0}".format(operation))

    info_message = ("{6} NAT rule: rule type '{2}', original_ip '{0}', "
                    "translated_ip '{1}',protocol '{3}', "
                    "original_port '{4}', translated_port '{5}'")
    if rule_type == "SNAT":
        # for SNAT type ports and protocol must by "any",
        #  because they are not configurable
        ctx.logger.info(
            info_message.format(private_ip, public_ip, rule_type, protocol,
                                new_original_port, translated_port, message))
        function(rule_type, private_ip, "any", public_ip, "any", "any")
    elif rule_type == "DNAT":
        ctx.logger.info(
            info_message.format(public_ip, private_ip, rule_type, protocol,
                                new_original_port, translated_port, message))
        function(rule_type, public_ip, str(new_original_port), private_ip,
                 str(translated_port), protocol)
def run_as_workflow(*args, **kwargs):
    # get current context
    ctx = kwargs.get('ctx', CloudifyContext)
    if ctx.type != context.DEPLOYMENT:
        raise cfy_exc.NonRecoverableError(
            "Called with wrong context: {ctx_type}".format(ctx_type=ctx.type))

    # check inputs
    if len(args):
        inputs = args[0]
    else:
        inputs = kwargs.get('inputs', {})

    properties = kwargs.get('properties', {})

    _execute(ctx=ctx,
             properties=properties,
             runtime_properties={'__inputs__': inputs},
             get_resource=workflow_get_resource,
             host_ip=None,
             log_stamp="{execution_id}_{workflow_id}".format(
                 execution_id=inputs.get("execution_id", 'noexecution'),
                 workflow_id=inputs.get("workflow_id", 'noworkflow')),
             kwargs=kwargs)
示例#28
0
 def handle(self):
     if not self.func:
         raise exceptions.NonRecoverableError('func not found: {0}'.
                                              format(self.cloudify_context))
     ctx = self.ctx
     kwargs = self.kwargs
     if ctx.task_target:
         # # this operation requires an AMQP client
         amqp_client_utils.init_amqp_client()
     else:
         # task is local (not through celery) so we need clone kwarg
         # and an amqp client is not required
         kwargs = copy.deepcopy(kwargs)
     if self.cloudify_context.get('has_intrinsic_functions') is True:
         kwargs = ctx._endpoint.evaluate_functions(payload=kwargs)
     if not self.cloudify_context.get('no_ctx_kwarg'):
         kwargs['ctx'] = ctx
     state.current_ctx.set(ctx, kwargs)
     try:
         result = self.func(*self.args, **kwargs)
     except:
         ctx.logger.error(
             'Exception raised on operation [%s] invocation',
             ctx.task_name, exc_info=True)
         raise
     finally:
         amqp_client_utils.close_amqp_client()
         state.current_ctx.clear()
         if ctx.type == context.NODE_INSTANCE:
             ctx.instance.update()
         elif ctx.type == context.RELATIONSHIP_INSTANCE:
             ctx.source.instance.update()
             ctx.target.instance.update()
     if ctx.operation._operation_retry:
         raise ctx.operation._operation_retry
     return result
def _run_one_string(ctx, netconf, rpc_string, xmlns, netconf_namespace,
                    strict_check, deep_error_check):
    ctx.logger.info("Checks: xml validation: {strict_check}, "
                    "rpc_error deep check: {deep_error_check} ".format(
                        strict_check=strict_check,
                        deep_error_check=deep_error_check))
    ctx.logger.debug(
        "Sent: {message}".format(message=filters.shorted_text(rpc_string)))

    # cisco send new line before package, so need strip
    try:
        response = netconf.send(rpc_string).strip()
    except exceptions.NonRecoverableError as e:
        # use str instead, for fully hide traceback and orignal exception name
        raise cfy_exc.NonRecoverableError(str(e))

    ctx.logger.debug(
        "Recieved: {response}".format(response=filters.shorted_text(response)))

    response_dict = _parse_response(ctx, xmlns, netconf_namespace, response,
                                    strict_check, deep_error_check)
    ctx.logger.debug("Package: {response}".format(
        response=filters.shorted_text(response_dict)))
    return response_dict
示例#30
0
def _add_custom_repo(repo, distro):

    repo_name = repo['name']

    if 'ubuntu' in distro:

        _add_keyserver(repo['apt']['key_server'])

        repo_entry = repo['apt']['entry']
        temp_file = '{0}.list'.format(repo_name)
        file_path = APT_SOURCELIST_DIR

    elif 'centos' in distro:

        repo_entry = repo['yum']['entry']
        temp_file = '{0}.repo'.format(repo_name)
        file_path = YUM_REPOS_DIR

    else:
        raise exceptions.NonRecoverableError(
            'Only CentOS and Ubuntu supported.')

    ctx.logger.info('Adding new repository source: {0}'.format(repo_name))
    _add_source_list(file_path, repo_entry, temp_file)