def _auto_increment_description(count, description):
    """
    Allow the incremental count in the description when defined with the
    string formatting (%) operator. Otherwise, repeat the same description.
    """
    if '%' in description:
        return [description % i for i in xrange(1, count + 1)]
    else:
        return [description] * count
def _auto_increment_hostname(count, hostname):
    """
    Allow a custom incremental count in the hostname when defined with the
    string formatting (%) operator. Otherwise, increment using name-01,
    name-02, name-03, and so forth.
    """
    if '%' not in hostname:
        hostname = "%s-%%01d" % hostname

    return [hostname % i for i in xrange(1, count + 1)]
示例#3
0
def _auto_increment_description(count, description):
    """
    Allow the incremental count in the description when defined with the
    string formatting (%) operator. Otherwise, repeat the same description.
    """
    if '%' in description:
        return [
            description % i
            for i in xrange(1, count + 1)
        ]
    else:
        return [description] * count
示例#4
0
def _auto_increment_hostname(count, hostname):
    """
    Allow a custom incremental count in the hostname when defined with the
    string formatting (%) operator. Otherwise, increment using name-01,
    name-02, name-03, and so forth.
    """
    if '%' not in hostname:
        hostname = "%s-%%01d" % hostname

    return [
        hostname % i
        for i in xrange(1, count + 1)
    ]
示例#5
0
    def generate_sequence(self):
        if self.stride >= 0:
            adjust = 1
        else:
            adjust = -1
        numbers = xrange(self.start, self.end + adjust, self.stride)

        for i in numbers:
            try:
                formatted = self.format % i
                yield formatted
            except (ValueError, TypeError):
                raise AnsibleError("problem formatting %r with %r" %
                                   (i, self.format))
示例#6
0
    def generate_sequence(self):
        if self.stride >= 0:
            adjust = 1
        else:
            adjust = -1
        numbers = xrange(self.start, self.end + adjust, self.stride)

        for i in numbers:
            try:
                formatted = self.format % i
                yield formatted
            except (ValueError, TypeError):
                raise AnsibleError(
                    "problem formatting %r with %r" % (i, self.format)
                )
示例#7
0
def create_virtual_machine(module, profitbricks):
    """
    Create new virtual machine

    module : AnsibleModule object
    community.general.profitbricks: authenticated profitbricks object

    Returns:
        True if a new virtual machine was created, false otherwise
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')
    lan = module.params.get('lan')
    wait_timeout = module.params.get('wait_timeout')
    failed = True
    datacenter_found = False

    virtual_machines = []
    virtual_machine_ids = []

    # Locate UUID for datacenter if referenced by name.
    datacenter_list = profitbricks.list_datacenters()
    datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
    if datacenter_id:
        datacenter_found = True

    if not datacenter_found:
        datacenter_response = _create_datacenter(module, profitbricks)
        datacenter_id = datacenter_response['id']

        _wait_for_completion(profitbricks, datacenter_response, wait_timeout,
                             "create_virtual_machine")

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if e.message.startswith('not all'):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e.message,
                                 exception=traceback.format_exc())

        number_range = xrange(count_offset,
                              count_offset + count + len(numbers))
        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)
    else:
        names = [name]

    # Prefetch a list of servers for later comparison.
    server_list = profitbricks.list_servers(datacenter_id)
    for name in names:
        # Skip server creation if the server already exists.
        if _get_server_id(server_list, name):
            continue

        create_response = _create_machine(module, profitbricks,
                                          str(datacenter_id), name)
        nics = profitbricks.list_nics(datacenter_id, create_response['id'])
        for n in nics['items']:
            if lan == n['properties']['lan']:
                create_response.update(
                    {'public_ip': n['properties']['ips'][0]})

        virtual_machines.append(create_response)

    failed = False

    results = {
        'failed': failed,
        'machines': virtual_machines,
        'action': 'create',
        'instance_ids': {
            'instances': [i['id'] for i in virtual_machines],
        }
    }

    return results
示例#8
0
def create_volume(module, profitbricks):
    """
    Creates a volume.

    This will create a volume in a datacenter.

    module : AnsibleModule object
    profitbricks: authenticated profitbricks object.

    Returns:
        True if the volume was created, false otherwise
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')

    datacenter_found = False
    failed = True
    volumes = []

    # Locate UUID for Datacenter
    if not (uuid_match.match(datacenter)):
        datacenter_list = profitbricks.list_datacenters()
        for d in datacenter_list['items']:
            dc = profitbricks.get_datacenter(d['id'])
            if datacenter == dc['properties']['name']:
                datacenter = d['id']
                datacenter_found = True
                break

    if not datacenter_found:
        module.fail_json(msg='datacenter could not be found.')

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if e.message.startswith('not all'):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e.message,
                                 exception=traceback.format_exc())

        number_range = xrange(count_offset,
                              count_offset + count + len(numbers))
        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)
    else:
        names = [name] * count

    for name in names:
        create_response = _create_volume(module, profitbricks, str(datacenter),
                                         name)
        volumes.append(create_response)
        _attach_volume(module, profitbricks, datacenter, create_response['id'])
        failed = False

    results = {
        'failed': failed,
        'volumes': volumes,
        'action': 'create',
        'instance_ids': {
            'instances': [i['id'] for i in volumes],
        }
    }

    return results
示例#9
0
def create_virtual_machine(module, client):
    """
    Create new virtual machine

    module : AnsibleModule object
    client: authenticated ionos-cloud object

    Returns:
        True if a new virtual machine was created, false otherwise
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')
    lan = module.params.get('lan')
    wait_timeout = module.params.get('wait_timeout')
    datacenter_found = False

    virtual_machines = []

    datacenter_server = ionoscloud.DataCenterApi(api_client=client)
    server_server = ionoscloud.ServerApi(api_client=client)
    nic_server = ionoscloud.NicApi(api_client=client)

    # Locate UUID for datacenter if referenced by name.
    datacenter_list = datacenter_server.datacenters_get(depth=2)
    datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
    if datacenter_id:
        datacenter_found = True

    if not datacenter_found:
        datacenter_response = _create_datacenter(module, client)
        datacenter_id = datacenter_response.id

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if (hasattr(e, 'message') and e.message.startswith('not all')
                    or to_native(e).startswith('not all')):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e, exception=traceback.format_exc())

        number_range = xrange(count_offset,
                              count_offset + count + len(numbers))

        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)
    else:
        names = [name]

    changed = False

    # Prefetch a list of servers for later comparison.
    server_list = server_server.datacenters_servers_get(
        datacenter_id=datacenter_id, depth=3)
    for name in names:
        # Skip server creation if the server already exists.
        server = _get_instance(server_list, name)
        if server is not None:
            virtual_machines.append(server)
            continue

        create_response = _create_machine(module, client, str(datacenter_id),
                                          name)
        changed = True

        virtual_machines.append(create_response)

    return {
        'changed': changed,
        'failed': False,
        'machines': [v.to_dict() for v in virtual_machines],
        'action': 'create'
    }
示例#10
0
    def _execute(self, variables=None):
        '''
        The primary workhorse of the executor system, this runs the task
        on the specified host (which may be the delegated_to host) and handles
        the retry/until and block rescue/always execution
        '''

        if variables is None:
            variables = self._job_vars

        templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)

        context_validation_error = None
        try:
            # apply the given task's information to the connection info,
            # which may override some fields already set by the play or
            # the options specified on the command line
            self._play_context = self._play_context.set_task_and_variable_override(task=self._task, variables=variables, templar=templar)

            # fields set from the play/task may be based on variables, so we have to
            # do the same kind of post validation step on it here before we use it.
            self._play_context.post_validate(templar=templar)

            # now that the play context is finalized, if the remote_addr is not set
            # default to using the host's address field as the remote address
            if not self._play_context.remote_addr:
                self._play_context.remote_addr = self._host.address

            # We also add "magic" variables back into the variables dict to make sure
            # a certain subset of variables exist.
            self._play_context.update_vars(variables)

            # FIXME: update connection/shell plugin options
        except AnsibleError as e:
            # save the error, which we'll raise later if we don't end up
            # skipping this task during the conditional evaluation step
            context_validation_error = e

        # Evaluate the conditional (if any) for this task, which we do before running
        # the final task post-validation. We do this before the post validation due to
        # the fact that the conditional may specify that the task be skipped due to a
        # variable not being present which would otherwise cause validation to fail
        try:
            if not self._task.evaluate_conditional(templar, variables):
                display.debug("when evaluation is False, skipping this task")
                return dict(changed=False, skipped=True, skip_reason='Conditional result was False', _ansible_no_log=self._play_context.no_log)
        except AnsibleError as e:
            # loop error takes precedence
            if self._loop_eval_error is not None:
                # Display the error from the conditional as well to prevent
                # losing information useful for debugging.
                display.v(to_text(e))
                raise self._loop_eval_error  # pylint: disable=raising-bad-type
            raise

        # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
        if self._loop_eval_error is not None:
            raise self._loop_eval_error  # pylint: disable=raising-bad-type

        # if we ran into an error while setting up the PlayContext, raise it now
        if context_validation_error is not None:
            raise context_validation_error  # pylint: disable=raising-bad-type

        # if this task is a TaskInclude, we just return now with a success code so the
        # main thread can expand the task list for the given host
        if self._task.action in ('include', 'include_tasks'):
            include_args = self._task.args.copy()
            include_file = include_args.pop('_raw_params', None)
            if not include_file:
                return dict(failed=True, msg="No include file was specified to the include")

            include_file = templar.template(include_file)
            return dict(include=include_file, include_args=include_args)

        # if this task is a IncludeRole, we just return now with a success code so the main thread can expand the task list for the given host
        elif self._task.action == 'include_role':
            include_args = self._task.args.copy()
            return dict(include_args=include_args)

        # Now we do final validation on the task, which sets all fields to their final values.
        try:
            self._task.post_validate(templar=templar)
        except AnsibleError:
            raise
        except Exception:
            return dict(changed=False, failed=True, _ansible_no_log=self._play_context.no_log, exception=to_text(traceback.format_exc()))
        if '_variable_params' in self._task.args:
            variable_params = self._task.args.pop('_variable_params')
            if isinstance(variable_params, dict):
                if C.INJECT_FACTS_AS_VARS:
                    display.warning("Using a variable for a task's 'args' is unsafe in some situations "
                                    "(see https://docs.ansible.com/ansible/devel/reference_appendices/faq.html#argsplat-unsafe)")
                variable_params.update(self._task.args)
                self._task.args = variable_params

        # get the connection and the handler for this execution
        if (not self._connection or
                not getattr(self._connection, 'connected', False) or
                self._play_context.remote_addr != self._connection._play_context.remote_addr):
            self._connection = self._get_connection(variables=variables, templar=templar)
        else:
            # if connection is reused, its _play_context is no longer valid and needs
            # to be replaced with the one templated above, in case other data changed
            self._connection._play_context = self._play_context

        if self._task.delegate_to:
            # use vars from delegated host (which already include task vars) instead of original host
            delegated_vars = variables.get('ansible_delegated_vars', {}).get(self._task.delegate_to, {})
            orig_vars = templar.available_variables
            templar.available_variables = delegated_vars
            plugin_vars = self._set_connection_options(delegated_vars, templar)
            templar.available_variables = orig_vars
        else:
            # just use normal host vars
            plugin_vars = self._set_connection_options(variables, templar)

        # get handler
        self._handler = self._get_action_handler(connection=self._connection, templar=templar)

        # Apply default params for action/module, if present
        self._task.args = get_action_args_with_defaults(
            self._task.action, self._task.args, self._task.module_defaults, templar, self._task._ansible_internal_redirect_list
        )

        # And filter out any fields which were set to default(omit), and got the omit token value
        omit_token = variables.get('omit')
        if omit_token is not None:
            self._task.args = remove_omit(self._task.args, omit_token)

        # Read some values from the task, so that we can modify them if need be
        if self._task.until:
            retries = self._task.retries
            if retries is None:
                retries = 3
            elif retries <= 0:
                retries = 1
            else:
                retries += 1
        else:
            retries = 1

        delay = self._task.delay
        if delay < 0:
            delay = 1

        # make a copy of the job vars here, in case we need to update them
        # with the registered variable value later on when testing conditions
        vars_copy = variables.copy()

        display.debug("starting attempt loop")
        result = None
        for attempt in xrange(1, retries + 1):
            display.debug("running the handler")
            try:
                if self._task.timeout:
                    old_sig = signal.signal(signal.SIGALRM, task_timeout)
                    signal.alarm(self._task.timeout)
                result = self._handler.run(task_vars=variables)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            except TaskTimeoutError as e:
                msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (self._task.action, self._task.timeout)
                return dict(failed=True, msg=msg)
            finally:
                if self._task.timeout:
                    signal.alarm(0)
                    old_sig = signal.signal(signal.SIGALRM, old_sig)
                self._handler.cleanup()
            display.debug("handler run complete")

            # preserve no log
            result["_ansible_no_log"] = self._play_context.no_log

            # update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            if self._task.register:
                if not isidentifier(self._task.register):
                    raise AnsibleError("Invalid variable name in 'register' specified: '%s'" % self._task.register)

                vars_copy[self._task.register] = result = wrap_var(result)

            if self._task.async_val > 0:
                if self._task.poll > 0 and not result.get('skipped') and not result.get('failed'):
                    result = self._poll_async_result(result=result, templar=templar, task_vars=vars_copy)
                    # FIXME callback 'v2_runner_on_async_poll' here

                # ensure no log is preserved
                result["_ansible_no_log"] = self._play_context.no_log

            # helper methods for use below in evaluating changed/failed_when
            def _evaluate_changed_when_result(result):
                if self._task.changed_when is not None and self._task.changed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.changed_when
                    result['changed'] = cond.evaluate_conditional(templar, vars_copy)

            def _evaluate_failed_when_result(result):
                if self._task.failed_when:
                    cond = Conditional(loader=self._loader)
                    cond.when = self._task.failed_when
                    failed_when_result = cond.evaluate_conditional(templar, vars_copy)
                    result['failed_when_result'] = result['failed'] = failed_when_result
                else:
                    failed_when_result = False
                return failed_when_result

            if 'ansible_facts' in result:
                if self._task.action in ('set_fact', 'include_vars'):
                    vars_copy.update(result['ansible_facts'])
                else:
                    # TODO: cleaning of facts should eventually become part of taskresults instead of vars
                    af = wrap_var(result['ansible_facts'])
                    vars_copy.update(namespace_facts(af))
                    if C.INJECT_FACTS_AS_VARS:
                        vars_copy.update(clean_facts(af))

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # Make attempts and retries available early to allow their use in changed/failed_when
            if self._task.until:
                result['attempts'] = attempt

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            # re-update the local copy of vars with the registered value, if specified,
            # or any facts which may have been generated by the module execution
            # This gives changed/failed_when access to additional recently modified
            # attributes of result
            if self._task.register:
                vars_copy[self._task.register] = result = wrap_var(result)

            # if we didn't skip this task, use the helpers to evaluate the changed/
            # failed_when properties
            if 'skipped' not in result:
                _evaluate_changed_when_result(result)
                _evaluate_failed_when_result(result)

            if retries > 1:
                cond = Conditional(loader=self._loader)
                cond.when = self._task.until
                if cond.evaluate_conditional(templar, vars_copy):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        display.debug('Retrying task, attempt %d of %d' % (attempt, retries))
                        self._final_q.put(TaskResult(self._host.name, self._task._uuid, result, task_fields=self._task.dump_attrs()), block=False)
                        time.sleep(delay)
                        self._handler = self._get_action_handler(connection=self._connection, templar=templar)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['attempts'] = retries - 1
                result['failed'] = True

        # do the final update of the local variables here, for both registered
        # values and any facts which may have been created
        if self._task.register:
            variables[self._task.register] = result = wrap_var(result)

        if 'ansible_facts' in result:
            if self._task.action in ('set_fact', 'include_vars'):
                variables.update(result['ansible_facts'])
            else:
                # TODO: cleaning of facts should eventually become part of taskresults instead of vars
                af = wrap_var(result['ansible_facts'])
                variables.update(namespace_facts(af))
                if C.INJECT_FACTS_AS_VARS:
                    variables.update(clean_facts(af))

        # save the notification target in the result, if it was specified, as
        # this task may be running in a loop in which case the notification
        # may be item-specific, ie. "notify: service {{item}}"
        if self._task.notify is not None:
            result['_ansible_notify'] = self._task.notify

        # add the delegated vars to the result, so we can reference them
        # on the results side without having to do any further templating
        if self._task.delegate_to:
            result["_ansible_delegated_vars"] = {'ansible_delegated_host': self._task.delegate_to}
            for k in plugin_vars:
                result["_ansible_delegated_vars"][k] = delegated_vars.get(k)
        # and return
        display.debug("attempt loop complete, returning result")
        return result
示例#11
0
def create_virtual_machine(module, profitbricks):
    """
    Create new virtual machine

    module : AnsibleModule object
    profitbricks: authenticated profitbricks object

    Returns:
        True if a new virtual machine was created, false otherwise
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')
    lan = module.params.get('lan')
    wait_timeout = module.params.get('wait_timeout')
    failed = True
    datacenter_found = False

    virtual_machines = []
    virtual_machine_ids = []

    # Locate UUID for datacenter if referenced by name.
    datacenter_list = profitbricks.list_datacenters()
    datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
    if datacenter_id:
        datacenter_found = True

    if not datacenter_found:
        datacenter_response = _create_datacenter(module, profitbricks)
        datacenter_id = datacenter_response['id']

        _wait_for_completion(profitbricks, datacenter_response,
                             wait_timeout, "create_virtual_machine")

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if e.message.startswith('not all'):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e.message, exception=traceback.format_exc())

        number_range = xrange(count_offset, count_offset + count + len(numbers))
        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)
    else:
        names = [name]

    # Prefetch a list of servers for later comparison.
    server_list = profitbricks.list_servers(datacenter_id)
    for name in names:
        # Skip server creation if the server already exists.
        if _get_server_id(server_list, name):
            continue

        create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
        nics = profitbricks.list_nics(datacenter_id, create_response['id'])
        for n in nics['items']:
            if lan == n['properties']['lan']:
                create_response.update({'public_ip': n['properties']['ips'][0]})

        virtual_machines.append(create_response)

    failed = False

    results = {
        'failed': failed,
        'machines': virtual_machines,
        'action': 'create',
        'instance_ids': {
            'instances': [i['id'] for i in virtual_machines],
        }
    }

    return results
def create_volume(module, profitbricks):
    """
    Create volumes.

    This will create one or more volumes in a datacenter.

    module : AnsibleModule object
    profitbricks: authenticated profitbricks object.

    Returns:
        dict of created volumes
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')

    datacenter_found = False
    volumes = []

    datacenter_list = profitbricks.list_datacenters()
    for d in datacenter_list['items']:
        dc = profitbricks.get_datacenter(d['id'])
        if datacenter in [dc['properties']['name'], dc['id']]:
            datacenter = d['id']
            datacenter_found = True
            break

    if not datacenter_found:
        module.fail_json(msg='datacenter could not be found.')

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if (hasattr(e, 'message') and e.message.startswith('not all') or to_native(e).startswith('not all')):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e.message, exception=traceback.format_exc())

        number_range = xrange(count_offset, count_offset + count + len(numbers))
        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)
    else:
        names = [name] * count

    changed = False

    # Prefetch a list of volumes for later comparison.
    volume_list = profitbricks.list_volumes(datacenter)

    for name in names:
        # Skip volume creation if a volume with the same name already exists.
        if _get_instance_id(volume_list, name):
            continue

        create_response = _create_volume(module, profitbricks, str(datacenter), name)
        volumes.append(create_response)
        _attach_volume(module, profitbricks, datacenter, create_response['id'])
        changed = True

    results = {
        'changed': changed,
        'failed': False,
        'volumes': volumes,
        'action': 'create',
        'instance_ids': {
            'instances': [i['id'] for i in volumes],
        }
    }

    return results
示例#13
0
    def retry(self, action, vars, retries, delay, until):
        if retries is None:
            retries = 3
        elif retries <= 0:
            retries = 1

        if delay < 0:
            delay = 1

        self._display.debug("starting attempt loop")
        result = None
        for attempt in xrange(1, retries + 1):
            self._display.debug("running the action")
            try:
                result = action.run(task_vars=vars)
            except AnsibleActionSkip as e:
                return dict(skipped=True, msg=to_text(e))
            except AnsibleActionFail as e:
                return dict(failed=True, msg=to_text(e))
            except AnsibleConnectionFailure as e:
                return dict(unreachable=True, msg=to_text(e))
            except TaskTimeoutError as e:
                msg = 'The %s action failed to execute in the expected time frame (%d) and was terminated' % (
                    self._task.action, self._task.timeout)
                return dict(failed=True, msg=msg)
            self._display.debug("action run complete")

            # set the failed property if it was missing.
            if 'failed' not in result:
                # rc is here for backwards compatibility and modules that use it instead of 'failed'
                if 'rc' in result and result['rc'] not in [0, "0"]:
                    result['failed'] = True
                else:
                    result['failed'] = False

            # Make attempts and retries available early to allow their use in changed/failed_when
            result['attempts'] = attempt

            # set the changed property if it was missing.
            if 'changed' not in result:
                result['changed'] = False

            if retries > 1:
                if until(result):
                    break
                else:
                    # no conditional check, or it failed, so sleep for the specified time
                    if attempt < retries:
                        result['_ansible_retry'] = True
                        result['retries'] = retries
                        self._display.debug('Retrying task, attempt %d of %d' %
                                            (attempt, retries))
                        self.v2_runner_retry(result)
                        time.sleep(delay)
        else:
            if retries > 1:
                # we ran out of attempts, so mark the result as failed
                result['failed'] = True
                result['attempts'] = retries - 1

        if not is_failed(result) and not until(result):
            result['failed'] = True
            result['msg'] = "Ran out of attempts."

        if not is_failed(result) and not is_finished(result):
            result['failed'] = True
            result['msg'] = "Job did not finish."

        return result
示例#14
0
文件: rax.py 项目: ernstp/ansible
def cloudservers(module, state=None, name=None, flavor=None, image=None,
                 meta=None, key_name=None, files=None, wait=True, wait_timeout=300,
                 disk_config=None, count=1, group=None, instance_ids=None,
                 exact_count=False, networks=None, count_offset=0,
                 auto_increment=False, extra_create_args=None, user_data=None,
                 config_drive=False, boot_from_volume=False,
                 boot_volume=None, boot_volume_size=None,
                 boot_volume_terminate=False):
    meta = {} if meta is None else meta
    files = {} if files is None else files
    instance_ids = [] if instance_ids is None else instance_ids
    networks = [] if networks is None else networks
    extra_create_args = {} if extra_create_args is None else extra_create_args

    cs = pyrax.cloudservers
    cnw = pyrax.cloud_networks
    if not cnw:
        module.fail_json(msg='Failed to instantiate client. This '
                             'typically indicates an invalid region or an '
                             'incorrectly capitalized region name.')

    if state == 'present' or (state == 'absent' and instance_ids is None):
        if not boot_from_volume and not boot_volume and not image:
            module.fail_json(msg='image is required for the "rax" module')

        for arg, value in dict(name=name, flavor=flavor).items():
            if not value:
                module.fail_json(msg='%s is required for the "rax" module' %
                                     arg)

        if boot_from_volume and not image and not boot_volume:
            module.fail_json(msg='image or boot_volume are required for the '
                                 '"rax" with boot_from_volume')

        if boot_from_volume and image and not boot_volume_size:
            module.fail_json(msg='boot_volume_size is required for the "rax" '
                                 'module with boot_from_volume and image')

        if boot_from_volume and image and boot_volume:
            image = None

    servers = []

    # Add the group meta key
    if group and 'group' not in meta:
        meta['group'] = group
    elif 'group' in meta and group is None:
        group = meta['group']

    # Normalize and ensure all metadata values are strings
    for k, v in meta.items():
        if isinstance(v, list):
            meta[k] = ','.join(['%s' % i for i in v])
        elif isinstance(v, dict):
            meta[k] = json.dumps(v)
        elif not isinstance(v, string_types):
            meta[k] = '%s' % v

    # When using state=absent with group, the absent block won't match the
    # names properly. Use the exact_count functionality to decrease the count
    # to the desired level
    was_absent = False
    if group is not None and state == 'absent':
        exact_count = True
        state = 'present'
        was_absent = True

    if image:
        image = rax_find_image(module, pyrax, image)

    nics = []
    if networks:
        for network in networks:
            nics.extend(rax_find_network(module, pyrax, network))

    # act on the state
    if state == 'present':
        # Idempotent ensurance of a specific count of servers
        if exact_count is not False:
            # See if we can find servers that match our options
            if group is None:
                module.fail_json(msg='"group" must be provided when using '
                                     '"exact_count"')

            if auto_increment:
                numbers = set()

                # See if the name is a printf like string, if not append
                # %d to the end
                try:
                    name % 0
                except TypeError as e:
                    if e.message.startswith('not all'):
                        name = '%s%%d' % name
                    else:
                        module.fail_json(msg=e.message)

                # regex pattern to match printf formatting
                pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
                for server in cs.servers.list():
                    # Ignore DELETED servers
                    if server.status == 'DELETED':
                        continue
                    if server.metadata.get('group') == group:
                        servers.append(server)
                    match = re.search(pattern, server.name)
                    if match:
                        number = int(match.group(1))
                        numbers.add(number)

                number_range = xrange(count_offset, count_offset + count)
                available_numbers = list(set(number_range)
                                         .difference(numbers))
            else:  # Not auto incrementing
                for server in cs.servers.list():
                    # Ignore DELETED servers
                    if server.status == 'DELETED':
                        continue
                    if server.metadata.get('group') == group:
                        servers.append(server)
                # available_numbers not needed here, we inspect auto_increment
                # again later

            # If state was absent but the count was changed,
            # assume we only wanted to remove that number of instances
            if was_absent:
                diff = len(servers) - count
                if diff < 0:
                    count = 0
                else:
                    count = diff

            if len(servers) > count:
                # We have more servers than we need, set state='absent'
                # and delete the extras, this should delete the oldest
                state = 'absent'
                kept = servers[:count]
                del servers[:count]
                instance_ids = []
                for server in servers:
                    instance_ids.append(server.id)
                delete(module, instance_ids=instance_ids, wait=wait,
                       wait_timeout=wait_timeout, kept=kept)
            elif len(servers) < count:
                # we have fewer servers than we need
                if auto_increment:
                    # auto incrementing server numbers
                    names = []
                    name_slice = count - len(servers)
                    numbers_to_use = available_numbers[:name_slice]
                    for number in numbers_to_use:
                        names.append(name % number)
                else:
                    # We are not auto incrementing server numbers,
                    # create a list of 'name' that matches how many we need
                    names = [name] * (count - len(servers))
            else:
                # we have the right number of servers, just return info
                # about all of the matched servers
                instances = []
                instance_ids = []
                for server in servers:
                    instances.append(rax_to_dict(server, 'server'))
                    instance_ids.append(server.id)
                module.exit_json(changed=False, action=None,
                                 instances=instances,
                                 success=[], error=[], timeout=[],
                                 instance_ids={'instances': instance_ids,
                                               'success': [], 'error': [],
                                               'timeout': []})
        else:  # not called with exact_count=True
            if group is not None:
                if auto_increment:
                    # we are auto incrementing server numbers, but not with
                    # exact_count
                    numbers = set()

                    # See if the name is a printf like string, if not append
                    # %d to the end
                    try:
                        name % 0
                    except TypeError as e:
                        if e.message.startswith('not all'):
                            name = '%s%%d' % name
                        else:
                            module.fail_json(msg=e.message)

                    # regex pattern to match printf formatting
                    pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
                    for server in cs.servers.list():
                        # Ignore DELETED servers
                        if server.status == 'DELETED':
                            continue
                        if server.metadata.get('group') == group:
                            servers.append(server)
                        match = re.search(pattern, server.name)
                        if match:
                            number = int(match.group(1))
                            numbers.add(number)

                    number_range = xrange(count_offset,
                                          count_offset + count + len(numbers))
                    available_numbers = list(set(number_range)
                                             .difference(numbers))
                    names = []
                    numbers_to_use = available_numbers[:count]
                    for number in numbers_to_use:
                        names.append(name % number)
                else:
                    # Not auto incrementing
                    names = [name] * count
            else:
                # No group was specified, and not using exact_count
                # Perform more simplistic matching
                search_opts = {
                    'name': '^%s$' % name,
                    'flavor': flavor
                }
                servers = []
                for server in cs.servers.list(search_opts=search_opts):
                    # Ignore DELETED servers
                    if server.status == 'DELETED':
                        continue

                    if not rax_find_server_image(module, server, image,
                                                 boot_volume):
                        continue

                    # Ignore servers with non matching metadata
                    if server.metadata != meta:
                        continue
                    servers.append(server)

                if len(servers) >= count:
                    # We have more servers than were requested, don't do
                    # anything. Not running with exact_count=True, so we assume
                    # more is OK
                    instances = []
                    for server in servers:
                        instances.append(rax_to_dict(server, 'server'))

                    instance_ids = [i['id'] for i in instances]
                    module.exit_json(changed=False, action=None,
                                     instances=instances, success=[], error=[],
                                     timeout=[],
                                     instance_ids={'instances': instance_ids,
                                                   'success': [], 'error': [],
                                                   'timeout': []})

                # We need more servers to reach out target, create names for
                # them, we aren't performing auto_increment here
                names = [name] * (count - len(servers))

        block_device_mapping_v2 = []
        if boot_from_volume:
            mapping = {
                'boot_index': '0',
                'delete_on_termination': boot_volume_terminate,
                'destination_type': 'volume',
            }
            if image:
                mapping.update({
                    'uuid': image,
                    'source_type': 'image',
                    'volume_size': boot_volume_size,
                })
                image = None
            elif boot_volume:
                volume = rax_find_volume(module, pyrax, boot_volume)
                mapping.update({
                    'uuid': pyrax.utils.get_id(volume),
                    'source_type': 'volume',
                })
            block_device_mapping_v2.append(mapping)

        create(module, names=names, flavor=flavor, image=image,
               meta=meta, key_name=key_name, files=files, wait=wait,
               wait_timeout=wait_timeout, disk_config=disk_config, group=group,
               nics=nics, extra_create_args=extra_create_args,
               user_data=user_data, config_drive=config_drive,
               existing=servers,
               block_device_mapping_v2=block_device_mapping_v2)

    elif state == 'absent':
        if instance_ids is None:
            # We weren't given an explicit list of server IDs to delete
            # Let's match instead
            search_opts = {
                'name': '^%s$' % name,
                'flavor': flavor
            }
            for server in cs.servers.list(search_opts=search_opts):
                # Ignore DELETED servers
                if server.status == 'DELETED':
                    continue

                if not rax_find_server_image(module, server, image,
                                             boot_volume):
                    continue

                # Ignore servers with non matching metadata
                if meta != server.metadata:
                    continue

                servers.append(server)

            # Build a list of server IDs to delete
            instance_ids = []
            for server in servers:
                if len(instance_ids) < count:
                    instance_ids.append(server.id)
                else:
                    break

        if not instance_ids:
            # No server IDs were matched for deletion, or no IDs were
            # explicitly provided, just exit and don't do anything
            module.exit_json(changed=False, action=None, instances=[],
                             success=[], error=[], timeout=[],
                             instance_ids={'instances': [],
                                           'success': [], 'error': [],
                                           'timeout': []})

        delete(module, instance_ids=instance_ids, wait=wait,
               wait_timeout=wait_timeout)
示例#15
0
def create_volume(module, client):
    """
    Create volumes.

    This will create one or more volumes in a datacenter.

    module : AnsibleModule object
    client: authenticated ionoscloud object.

    Returns:
        dict of created volumes
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')

    volume_server = ionoscloud.VolumeApi(client)
    datacenter_server = ionoscloud.DataCenterApi(client)
    servers_server = ionoscloud.ServerApi(client)

    datacenter_found = False
    volumes = []
    instance_ids = []

    datacenter_list = datacenter_server.datacenters_get(depth=2)
    for d in datacenter_list.items:
        dc = datacenter_server.datacenters_find_by_id(d.id)
        if datacenter in [dc.properties.name, dc.id]:
            datacenter = d.id
            datacenter_found = True
            break

    if not datacenter_found:
        module.fail_json(msg='datacenter could not be found.')

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if (hasattr(e, 'message') and e.message.startswith('not all') or to_native(e).startswith('not all')):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e.message, exception=traceback.format_exc())

        number_range = xrange(count_offset, count_offset + count + len(numbers))
        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)

    else:
        names = [name] * count

    changed = False

    # Prefetch a list of volumes for later comparison.
    volume_list = volume_server.datacenters_volumes_get(datacenter, depth=2)

    for name in names:
        # Skip volume creation if a volume with the same name already exists.
        if _get_instance_id(volume_list, name):
            volumes.append(_get_resource(volume_list, name))
            continue

        create_response = _create_volume(module, volume_server, str(datacenter), name, client)
        volumes.append(create_response)
        instance_ids.append(create_response.id)
        _attach_volume(module, servers_server, datacenter, create_response.id)
        changed = True

    results = {
        'changed': changed,
        'failed': False,
        'volumes': [v.to_dict() for v in volumes],
        'action': 'create',
        'instance_ids': instance_ids
    }

    return results
示例#16
0
def cloudservers(module,
                 state=None,
                 name=None,
                 flavor=None,
                 image=None,
                 meta=None,
                 key_name=None,
                 files=None,
                 wait=True,
                 wait_timeout=300,
                 disk_config=None,
                 count=1,
                 group=None,
                 instance_ids=None,
                 exact_count=False,
                 networks=None,
                 count_offset=0,
                 auto_increment=False,
                 extra_create_args=None,
                 user_data=None,
                 config_drive=False,
                 boot_from_volume=False,
                 boot_volume=None,
                 boot_volume_size=None,
                 boot_volume_terminate=False):
    meta = {} if meta is None else meta
    files = {} if files is None else files
    instance_ids = [] if instance_ids is None else instance_ids
    networks = [] if networks is None else networks
    extra_create_args = {} if extra_create_args is None else extra_create_args

    cs = pyrax.cloudservers
    cnw = pyrax.cloud_networks
    if not cnw:
        module.fail_json(msg='Failed to instantiate client. This '
                         'typically indicates an invalid region or an '
                         'incorrectly capitalized region name.')

    if state == 'present' or (state == 'absent' and instance_ids is None):
        if not boot_from_volume and not boot_volume and not image:
            module.fail_json(msg='image is required for the "rax" module')

        for arg, value in dict(name=name, flavor=flavor).items():
            if not value:
                module.fail_json(msg='%s is required for the "rax" module' %
                                 arg)

        if boot_from_volume and not image and not boot_volume:
            module.fail_json(msg='image or boot_volume are required for the '
                             '"rax" with boot_from_volume')

        if boot_from_volume and image and not boot_volume_size:
            module.fail_json(msg='boot_volume_size is required for the "rax" '
                             'module with boot_from_volume and image')

        if boot_from_volume and image and boot_volume:
            image = None

    servers = []

    # Add the group meta key
    if group and 'group' not in meta:
        meta['group'] = group
    elif 'group' in meta and group is None:
        group = meta['group']

    # Normalize and ensure all metadata values are strings
    for k, v in meta.items():
        if isinstance(v, list):
            meta[k] = ','.join(['%s' % i for i in v])
        elif isinstance(v, dict):
            meta[k] = json.dumps(v)
        elif not isinstance(v, string_types):
            meta[k] = '%s' % v

    # When using state=absent with group, the absent block won't match the
    # names properly. Use the exact_count functionality to decrease the count
    # to the desired level
    was_absent = False
    if group is not None and state == 'absent':
        exact_count = True
        state = 'present'
        was_absent = True

    if image:
        image = rax_find_image(module, pyrax, image)

    nics = []
    if networks:
        for network in networks:
            nics.extend(rax_find_network(module, pyrax, network))

    # act on the state
    if state == 'present':
        # Idempotent ensurance of a specific count of servers
        if exact_count is not False:
            # See if we can find servers that match our options
            if group is None:
                module.fail_json(msg='"group" must be provided when using '
                                 '"exact_count"')

            if auto_increment:
                numbers = set()

                # See if the name is a printf like string, if not append
                # %d to the end
                try:
                    name % 0
                except TypeError as e:
                    if e.message.startswith('not all'):
                        name = '%s%%d' % name
                    else:
                        module.fail_json(msg=e.message)

                # regex pattern to match printf formatting
                pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
                for server in cs.servers.list():
                    # Ignore DELETED servers
                    if server.status == 'DELETED':
                        continue
                    if server.metadata.get('group') == group:
                        servers.append(server)
                    match = re.search(pattern, server.name)
                    if match:
                        number = int(match.group(1))
                        numbers.add(number)

                number_range = xrange(count_offset, count_offset + count)
                available_numbers = list(set(number_range).difference(numbers))
            else:  # Not auto incrementing
                for server in cs.servers.list():
                    # Ignore DELETED servers
                    if server.status == 'DELETED':
                        continue
                    if server.metadata.get('group') == group:
                        servers.append(server)
                # available_numbers not needed here, we inspect auto_increment
                # again later

            # If state was absent but the count was changed,
            # assume we only wanted to remove that number of instances
            if was_absent:
                diff = len(servers) - count
                if diff < 0:
                    count = 0
                else:
                    count = diff

            if len(servers) > count:
                # We have more servers than we need, set state='absent'
                # and delete the extras, this should delete the oldest
                state = 'absent'
                kept = servers[:count]
                del servers[:count]
                instance_ids = []
                for server in servers:
                    instance_ids.append(server.id)
                delete(module,
                       instance_ids=instance_ids,
                       wait=wait,
                       wait_timeout=wait_timeout,
                       kept=kept)
            elif len(servers) < count:
                # we have fewer servers than we need
                if auto_increment:
                    # auto incrementing server numbers
                    names = []
                    name_slice = count - len(servers)
                    numbers_to_use = available_numbers[:name_slice]
                    for number in numbers_to_use:
                        names.append(name % number)
                else:
                    # We are not auto incrementing server numbers,
                    # create a list of 'name' that matches how many we need
                    names = [name] * (count - len(servers))
            else:
                # we have the right number of servers, just return info
                # about all of the matched servers
                instances = []
                instance_ids = []
                for server in servers:
                    instances.append(rax_to_dict(server, 'server'))
                    instance_ids.append(server.id)
                module.exit_json(changed=False,
                                 action=None,
                                 instances=instances,
                                 success=[],
                                 error=[],
                                 timeout=[],
                                 instance_ids={
                                     'instances': instance_ids,
                                     'success': [],
                                     'error': [],
                                     'timeout': []
                                 })
        else:  # not called with exact_count=True
            if group is not None:
                if auto_increment:
                    # we are auto incrementing server numbers, but not with
                    # exact_count
                    numbers = set()

                    # See if the name is a printf like string, if not append
                    # %d to the end
                    try:
                        name % 0
                    except TypeError as e:
                        if e.message.startswith('not all'):
                            name = '%s%%d' % name
                        else:
                            module.fail_json(msg=e.message)

                    # regex pattern to match printf formatting
                    pattern = re.sub(r'%\d*[sd]', r'(\d+)', name)
                    for server in cs.servers.list():
                        # Ignore DELETED servers
                        if server.status == 'DELETED':
                            continue
                        if server.metadata.get('group') == group:
                            servers.append(server)
                        match = re.search(pattern, server.name)
                        if match:
                            number = int(match.group(1))
                            numbers.add(number)

                    number_range = xrange(count_offset,
                                          count_offset + count + len(numbers))
                    available_numbers = list(
                        set(number_range).difference(numbers))
                    names = []
                    numbers_to_use = available_numbers[:count]
                    for number in numbers_to_use:
                        names.append(name % number)
                else:
                    # Not auto incrementing
                    names = [name] * count
            else:
                # No group was specified, and not using exact_count
                # Perform more simplistic matching
                search_opts = {'name': '^%s$' % name, 'flavor': flavor}
                servers = []
                for server in cs.servers.list(search_opts=search_opts):
                    # Ignore DELETED servers
                    if server.status == 'DELETED':
                        continue

                    if not rax_find_server_image(module, server, image,
                                                 boot_volume):
                        continue

                    # Ignore servers with non matching metadata
                    if server.metadata != meta:
                        continue
                    servers.append(server)

                if len(servers) >= count:
                    # We have more servers than were requested, don't do
                    # anything. Not running with exact_count=True, so we assume
                    # more is OK
                    instances = []
                    for server in servers:
                        instances.append(rax_to_dict(server, 'server'))

                    instance_ids = [i['id'] for i in instances]
                    module.exit_json(changed=False,
                                     action=None,
                                     instances=instances,
                                     success=[],
                                     error=[],
                                     timeout=[],
                                     instance_ids={
                                         'instances': instance_ids,
                                         'success': [],
                                         'error': [],
                                         'timeout': []
                                     })

                # We need more servers to reach out target, create names for
                # them, we aren't performing auto_increment here
                names = [name] * (count - len(servers))

        block_device_mapping_v2 = []
        if boot_from_volume:
            mapping = {
                'boot_index': '0',
                'delete_on_termination': boot_volume_terminate,
                'destination_type': 'volume',
            }
            if image:
                mapping.update({
                    'uuid': image,
                    'source_type': 'image',
                    'volume_size': boot_volume_size,
                })
                image = None
            elif boot_volume:
                volume = rax_find_volume(module, pyrax, boot_volume)
                mapping.update({
                    'uuid': pyrax.utils.get_id(volume),
                    'source_type': 'volume',
                })
            block_device_mapping_v2.append(mapping)

        create(module,
               names=names,
               flavor=flavor,
               image=image,
               meta=meta,
               key_name=key_name,
               files=files,
               wait=wait,
               wait_timeout=wait_timeout,
               disk_config=disk_config,
               group=group,
               nics=nics,
               extra_create_args=extra_create_args,
               user_data=user_data,
               config_drive=config_drive,
               existing=servers,
               block_device_mapping_v2=block_device_mapping_v2)

    elif state == 'absent':
        if instance_ids is None:
            # We weren't given an explicit list of server IDs to delete
            # Let's match instead
            search_opts = {'name': '^%s$' % name, 'flavor': flavor}
            for server in cs.servers.list(search_opts=search_opts):
                # Ignore DELETED servers
                if server.status == 'DELETED':
                    continue

                if not rax_find_server_image(module, server, image,
                                             boot_volume):
                    continue

                # Ignore servers with non matching metadata
                if meta != server.metadata:
                    continue

                servers.append(server)

            # Build a list of server IDs to delete
            instance_ids = []
            for server in servers:
                if len(instance_ids) < count:
                    instance_ids.append(server.id)
                else:
                    break

        if not instance_ids:
            # No server IDs were matched for deletion, or no IDs were
            # explicitly provided, just exit and don't do anything
            module.exit_json(changed=False,
                             action=None,
                             instances=[],
                             success=[],
                             error=[],
                             timeout=[],
                             instance_ids={
                                 'instances': [],
                                 'success': [],
                                 'error': [],
                                 'timeout': []
                             })

        delete(module,
               instance_ids=instance_ids,
               wait=wait,
               wait_timeout=wait_timeout)
示例#17
0
    def exec_command(self, cmd, in_data=None, sudoable=True):
        ''' run a command on the ssm host '''

        super(Connection, self).exec_command(cmd,
                                             in_data=in_data,
                                             sudoable=sudoable)

        display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self.host)

        session = self._session

        mark_begin = "".join([
            random.choice(string.ascii_letters)
            for i in xrange(self.MARK_LENGTH)
        ])
        if self.is_windows:
            mark_start = mark_begin + " $LASTEXITCODE"
        else:
            mark_start = mark_begin
        mark_end = "".join([
            random.choice(string.ascii_letters)
            for i in xrange(self.MARK_LENGTH)
        ])

        # Wrap command in markers accordingly for the shell used
        cmd = self._wrap_command(cmd, sudoable, mark_start, mark_end)

        self._flush_stderr(session)

        for chunk in chunks(cmd, 1024):
            session.stdin.write(to_bytes(chunk, errors='surrogate_or_strict'))

        # Read stdout between the markers
        stdout = ''
        win_line = ''
        begin = False
        stop_time = int(round(time.time())) + self.get_option('ssm_timeout')
        while session.poll() is None:
            remaining = stop_time - int(round(time.time()))
            if remaining < 1:
                self._timeout = True
                display.vvvv(u"EXEC timeout stdout: {0}".format(
                    to_text(stdout)),
                             host=self.host)
                raise AnsibleConnectionFailure(
                    "SSM exec_command timeout on host: %s" % self.instance_id)
            if self._poll_stdout.poll(1000):
                line = self._filter_ansi(self._stdout.readline())
                display.vvvv(u"EXEC stdout line: {0}".format(to_text(line)),
                             host=self.host)
            else:
                display.vvvv(u"EXEC remaining: {0}".format(remaining),
                             host=self.host)
                continue

            if not begin and self.is_windows:
                win_line = win_line + line
                line = win_line

            if mark_start in line:
                begin = True
                if not line.startswith(mark_start):
                    stdout = ''
                continue
            if begin:
                if mark_end in line:
                    display.vvvv(u"POST_PROCESS: {0}".format(to_text(stdout)),
                                 host=self.host)
                    returncode, stdout = self._post_process(stdout, mark_begin)
                    break
                else:
                    stdout = stdout + line

        stderr = self._flush_stderr(session)

        return (returncode, stdout, stderr)
示例#18
0
def create_volume(module, profitbricks):
    """
    Creates a volume.

    This will create a volume in a datacenter.

    module : AnsibleModule object
    profitbricks: authenticated profitbricks object.

    Returns:
        True if the volume was created, false otherwise
    """
    datacenter = module.params.get('datacenter')
    name = module.params.get('name')
    auto_increment = module.params.get('auto_increment')
    count = module.params.get('count')

    datacenter_found = False
    failed = True
    volumes = []

    # Locate UUID for Datacenter
    if not (uuid_match.match(datacenter)):
        datacenter_list = profitbricks.list_datacenters()
        for d in datacenter_list['items']:
            dc = profitbricks.get_datacenter(d['id'])
            if datacenter == dc['properties']['name']:
                datacenter = d['id']
                datacenter_found = True
                break

    if not datacenter_found:
        module.fail_json(msg='datacenter could not be found.')

    if auto_increment:
        numbers = set()
        count_offset = 1

        try:
            name % 0
        except TypeError as e:
            if e.message.startswith('not all'):
                name = '%s%%d' % name
            else:
                module.fail_json(msg=e.message, exception=traceback.format_exc())

        number_range = xrange(count_offset, count_offset + count + len(numbers))
        available_numbers = list(set(number_range).difference(numbers))
        names = []
        numbers_to_use = available_numbers[:count]
        for number in numbers_to_use:
            names.append(name % number)
    else:
        names = [name] * count

    for name in names:
        create_response = _create_volume(module, profitbricks, str(datacenter), name)
        volumes.append(create_response)
        _attach_volume(module, profitbricks, datacenter, create_response['id'])
        failed = False

    results = {
        'failed': failed,
        'volumes': volumes,
        'action': 'create',
        'instance_ids': {
            'instances': [i['id'] for i in volumes],
        }
    }

    return results