示例#1
0
def shingle(tokens, window=4):
    '''A generator for a moving window of the provided tokens.'''
    if window <= 0:
        raise ValueError('Window size must be positive')
    its = []
    for number in six_range(window):
        it = iter(tokens)
        its.append(it)
        for _ in six_range(number):
            next(it)
    while True:
        yield [next(it) for it in its]
示例#2
0
def shingle(tokens, window=4):
    '''A generator for a moving window of the provided tokens.'''
    if window <= 0:
        raise ValueError('Window size must be positive')
    its = []
    for number in six_range(window):
        it = iter(tokens)
        its.append(it)
        for _ in six_range(number):
            next(it)
    while True:
        yield [next(it) for it in its]
示例#3
0
    def __init__(self,
                 filename=None,
                 seq_len=256,
                 ls_lm=True,
                 add_bos=False,
                 BOS_ID=None):
        store_attr()
        self.native_to_unicode = (lambda s: s.decode("utf-8")) if PY2 else (
            lambda s: s)
        self._ALPHANUMERIC_CHAR_SET = set(
            unichr(i) for i in six_range(sys.maxunicode)
            if (unicodedata.category(unichr(i)).startswith("L")
                or unicodedata.category(unichr(i)).startswith("N")))

        self.PAD = "<pad>"
        self.EOS = "<EOS>"
        self.RESERVED_TOKENS = [self.PAD, self.EOS]
        self.NUM_RESERVED_TOKENS = len(self.RESERVED_TOKENS)
        self.PAD_ID = self.RESERVED_TOKENS.index(self.PAD)  # Normally 0
        self.EOS_ID = self.RESERVED_TOKENS.index(self.EOS)  # Normally 1
        if BOS_ID is None: self.BOS_ID = self.PAD_ID

        self._UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
        self._ESCAPE_CHARS = set(u"\\_u;0123456789")

        self._alphabet = set()
        self.filename = filename
        if filename is not None:
            self._load_from_file(filename)
        super(SubwordTextEncoder, self).__init__()
示例#4
0
    def _escaped_token_to_subtoken_strings(self, escaped_token):
        """Converts an escaped token string to a list of subtoken strings.

    Args:
      escaped_token: An escaped token as a unicode string.
    Returns:
      A list of subtokens as unicode strings.
    """
        # NOTE: This algorithm is greedy; it won't necessarily produce the "best"
        # list of subtokens.
        ret = []
        start = 0
        token_len = len(escaped_token)
        while start < token_len:
            for end in six_range(
                    min(token_len, start + self._max_subtoken_len), start, -1):
                subtoken = escaped_token[start:end]
                if subtoken in self._subtoken_string_to_id:
                    ret.append(subtoken)
                    start = end
                    break

            else:  # Did not break
                # If there is no possible encoding of the escaped token then one of the
                # characters in the token is not in the alphabet. This should be
                # impossible and would be indicative of a bug.
                assert False, "Token substring not found in subtoken vocabulary."

        return ret
示例#5
0
    def decodes(self, ids, strip_extraneous=False):
        """Converts a sequence of subtoken ids to a native string.

    Args:
      ids: a list of integers in the range [0, vocab_size)
      strip_extraneous: bool, whether to strip off extraneous tokens
        (EOS and PAD).

    Returns:
      a native string
    """
        if strip_extraneous:
            ids = strip_ids(ids, list(six_range(self._num_reserved_ids or 0)))
        return self.unicode_to_native(
            self._decode(self._subtoken_ids_to_tokens(ids)))
示例#6
0
 def _encode(self, text):
     if not text:
         return []
     ret = []
     token_start = 0
     # Classify each character in the input string
     is_alnum = [c in self._ALPHANUMERIC_CHAR_SET for c in text]
     for pos in six_range(1, len(text)):
         if is_alnum[pos] != is_alnum[pos - 1]:
             token = text[token_start:pos]
             if token != u" " or token_start == 0:
                 ret.append(token)
             token_start = pos
     final_token = text[token_start:]
     ret.append(final_token)
     return ret
async def security_rule_present(hub, ctx, name, access, direction, priority, protocol, security_group, resource_group,
                          destination_address_prefix=None, destination_port_range=None, source_address_prefix=None,
                          source_port_range=None, description=None, destination_address_prefixes=None,
                          destination_port_ranges=None, source_address_prefixes=None, source_port_ranges=None,
                          connection_auth=None, **kwargs):
    '''
    .. versionadded:: 1.0.0

    Ensure a security rule exists.

    :param name:
        Name of the security rule.

    :param access:
        'allow' or 'deny'

    :param direction:
        'inbound' or 'outbound'

    :param priority:
        Integer between 100 and 4096 used for ordering rule application.

    :param protocol:
        'tcp', 'udp', or '*'

    :param security_group:
        The name of the existing network security group to contain the security rule.

    :param resource_group:
        The resource group assigned to the network security group.

    :param description:
        Optional description of the security rule.

    :param destination_address_prefix:
        The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
        Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
        If this is an ingress rule, specifies where network traffic originates from.

    :param destination_port_range:
        The destination port or range. Integer or range between 0 and 65535. Asterix '*'
        can also be used to match all ports.

    :param source_address_prefix:
        The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
        Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
        If this is an ingress rule, specifies where network traffic originates from.

    :param source_port_range:
        The source port or range. Integer or range between 0 and 65535. Asterix '*'
        can also be used to match all ports.

    :param destination_address_prefixes:
        A list of destination_address_prefix values. This parameter overrides destination_address_prefix
        and will cause any value entered there to be ignored.

    :param destination_port_ranges:
        A list of destination_port_range values. This parameter overrides destination_port_range
        and will cause any value entered there to be ignored.

    :param source_address_prefixes:
        A list of source_address_prefix values. This parameter overrides source_address_prefix
        and will cause any value entered there to be ignored.

    :param source_port_ranges:
        A list of source_port_range values. This parameter overrides source_port_range
        and will cause any value entered there to be ignored.

    :param connection_auth:
        A dict with subscription and authentication parameters to be used in connecting to the
        Azure Resource Manager API.

    Example usage:

    .. code-block:: yaml

        Ensure security rule exists:
            azurerm.network.network_security_group.security_rule_present:
                - name: nsg1_rule2
                - security_group: nsg1
                - resource_group: group1
                - priority: 101
                - protocol: tcp
                - access: allow
                - direction: inbound
                - source_address_prefix: internet
                - destination_address_prefix: virtualnetwork
                - source_port_range: '*'
                - destination_port_ranges:
                  - '80'
                  - '443'
                - connection_auth: {{ profile }}
                - require:
                  - azurearm_network: Ensure network security group exists

    '''
    ret = {
        'name': name,
        'result': False,
        'comment': '',
        'changes': {}
    }

    if not isinstance(connection_auth, dict):
        ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
        return ret

    exclusive_params = [
        ('source_port_ranges', 'source_port_range'),
        ('source_address_prefixes', 'source_address_prefix'),
        ('destination_port_ranges', 'destination_port_range'),
        ('destination_address_prefixes', 'destination_address_prefix'),
    ]

    for params in exclusive_params:
        # pylint: disable=eval-used
        if not eval(params[0]) and not eval(params[1]):
            ret['comment'] = 'Either the {0} or {1} parameter must be provided!'.format(params[0], params[1])
            return ret
        # pylint: disable=eval-used
        if eval(params[0]):
            # pylint: disable=eval-used
            if not isinstance(eval(params[0]), list):
                ret['comment'] = 'The {0} parameter must be a list!'.format(params[0])
                return ret
            # pylint: disable=exec-used
            exec('{0} = None'.format(params[1]))

    rule = await hub.exec.azurerm.network.network_security_group.security_rule_get(
        name,
        security_group,
        resource_group,
        azurearm_log_level='info',
        **connection_auth
    )

    if 'error' not in rule:
        # access changes
        if access.capitalize() != rule.get('access'):
            ret['changes']['access'] = {
                'old': rule.get('access'),
                'new': access
            }

        # description changes
        if description != rule.get('description'):
            ret['changes']['description'] = {
                'old': rule.get('description'),
                'new': description
            }

        # direction changes
        if direction.capitalize() != rule.get('direction'):
            ret['changes']['direction'] = {
                'old': rule.get('direction'),
                'new': direction
            }

        # priority changes
        if int(priority) != rule.get('priority'):
            ret['changes']['priority'] = {
                'old': rule.get('priority'),
                'new': priority
            }

        # protocol changes
        if protocol.lower() != rule.get('protocol', '').lower():
            ret['changes']['protocol'] = {
                'old': rule.get('protocol'),
                'new': protocol
            }

        # destination_port_range changes
        if destination_port_range != rule.get('destination_port_range'):
            ret['changes']['destination_port_range'] = {
                'old': rule.get('destination_port_range'),
                'new': destination_port_range
            }

        # source_port_range changes
        if source_port_range != rule.get('source_port_range'):
            ret['changes']['source_port_range'] = {
                'old': rule.get('source_port_range'),
                'new': source_port_range
            }

        # destination_port_ranges changes
        if sorted(destination_port_ranges or []) != sorted(rule.get('destination_port_ranges', [])):
            ret['changes']['destination_port_ranges'] = {
                'old': rule.get('destination_port_ranges'),
                'new': destination_port_ranges
            }

        # source_port_ranges changes
        if sorted(source_port_ranges or []) != sorted(rule.get('source_port_ranges', [])):
            ret['changes']['source_port_ranges'] = {
                'old': rule.get('source_port_ranges'),
                'new': source_port_ranges
            }

        # destination_address_prefix changes
        if (destination_address_prefix or '').lower() != rule.get('destination_address_prefix', '').lower():
            ret['changes']['destination_address_prefix'] = {
                'old': rule.get('destination_address_prefix'),
                'new': destination_address_prefix
            }

        # source_address_prefix changes
        if (source_address_prefix or '').lower() != rule.get('source_address_prefix', '').lower():
            ret['changes']['source_address_prefix'] = {
                'old': rule.get('source_address_prefix'),
                'new': source_address_prefix
            }

        # destination_address_prefixes changes
        if sorted(destination_address_prefixes or []) != sorted(rule.get('destination_address_prefixes', [])):
            if len(destination_address_prefixes or []) != len(rule.get('destination_address_prefixes', [])):
                ret['changes']['destination_address_prefixes'] = {
                    'old': rule.get('destination_address_prefixes'),
                    'new': destination_address_prefixes
                }
            else:
                local_dst_addrs, remote_dst_addrs = (sorted(destination_address_prefixes),
                                                     sorted(rule.get('destination_address_prefixes')))
                for idx in six_range(0, len(local_dst_addrs)):
                    if local_dst_addrs[idx].lower() != remote_dst_addrs[idx].lower():
                        ret['changes']['destination_address_prefixes'] = {
                            'old': rule.get('destination_address_prefixes'),
                            'new': destination_address_prefixes
                        }
                        break

        # source_address_prefixes changes
        if sorted(source_address_prefixes or []) != sorted(rule.get('source_address_prefixes', [])):
            if len(source_address_prefixes or []) != len(rule.get('source_address_prefixes', [])):
                ret['changes']['source_address_prefixes'] = {
                    'old': rule.get('source_address_prefixes'),
                    'new': source_address_prefixes
                }
            else:
                local_src_addrs, remote_src_addrs = (sorted(source_address_prefixes),
                                                     sorted(rule.get('source_address_prefixes')))
                for idx in six_range(0, len(local_src_addrs)):
                    if local_src_addrs[idx].lower() != remote_src_addrs[idx].lower():
                        ret['changes']['source_address_prefixes'] = {
                            'old': rule.get('source_address_prefixes'),
                            'new': source_address_prefixes
                        }
                        break

        if not ret['changes']:
            ret['result'] = True
            ret['comment'] = 'Security rule {0} is already present.'.format(name)
            return ret

        if ctx['test']:
            ret['result'] = None
            ret['comment'] = 'Security rule {0} would be updated.'.format(name)
            return ret

    else:
        ret['changes'] = {
            'old': {},
            'new': {
                'name': name,
                'access': access,
                'description': description,
                'direction': direction,
                'priority': priority,
                'protocol': protocol,
                'destination_address_prefix': destination_address_prefix,
                'destination_address_prefixes': destination_address_prefixes,
                'destination_port_range': destination_port_range,
                'destination_port_ranges': destination_port_ranges,
                'source_address_prefix': source_address_prefix,
                'source_address_prefixes': source_address_prefixes,
                'source_port_range': source_port_range,
                'source_port_ranges': source_port_ranges,
            }
        }

    if ctx['test']:
        ret['comment'] = 'Security rule {0} would be created.'.format(name)
        ret['result'] = None
        return ret

    rule_kwargs = kwargs.copy()
    rule_kwargs.update(connection_auth)

    rule = await hub.exec.azurerm.network.network_security_group.security_rule_create_or_update(
        name=name,
        access=access,
        description=description,
        direction=direction,
        priority=priority,
        protocol=protocol,
        security_group=security_group,
        resource_group=resource_group,
        destination_address_prefix=destination_address_prefix,
        destination_address_prefixes=destination_address_prefixes,
        destination_port_range=destination_port_range,
        destination_port_ranges=destination_port_ranges,
        source_address_prefix=source_address_prefix,
        source_address_prefixes=source_address_prefixes,
        source_port_range=source_port_range,
        source_port_ranges=source_port_ranges,
        **rule_kwargs
    )

    if 'error' not in rule:
        ret['result'] = True
        ret['comment'] = 'Security rule {0} has been created.'.format(name)
        return ret

    ret['comment'] = 'Failed to create security rule {0}! ({1})'.format(name, rule.get('error'))
    return ret
示例#8
0
async def create_or_update(hub, name, resource_group, **kwargs):
    '''
    .. versionadded:: 1.0.0

    Create or update a load balancer within a specified resource group.

    :param name: The name of the load balancer to create.

    :param resource_group: The resource group name assigned to the
        load balancer.

    CLI Example:

    .. code-block:: bash

        azurerm.network.load_balancer.create_or_update testlb testgroup

    '''
    if 'location' not in kwargs:
        rg_props = await hub.exec.azurerm.resource.group.get(
            resource_group, **kwargs)

        if 'error' in rg_props:
            log.error(
                'Unable to determine location from resource group specified.')
            return False
        kwargs['location'] = rg_props['location']

    netconn = await hub.exec.utils.azurerm.get_client('network', **kwargs)

    if isinstance(kwargs.get('frontend_ip_configurations'), list):
        for idx in six_range(0, len(kwargs['frontend_ip_configurations'])):
            # Use Public IP Address name to link to the ID of an existing Public IP
            if 'public_ip_address' in kwargs['frontend_ip_configurations'][
                    idx]:
                pub_ip = public_ip_address_get(
                    name=kwargs['frontend_ip_configurations'][idx]
                    ['public_ip_address'],
                    resource_group=resource_group,
                    **kwargs)
                if 'error' not in pub_ip:
                    kwargs['frontend_ip_configurations'][idx][
                        'public_ip_address'] = {
                            'id': str(pub_ip['id'])
                        }
            # Use Subnet name to link to the ID of an existing Subnet
            elif 'subnet' in kwargs['frontend_ip_configurations'][idx]:
                vnets = virtual_networks_list(resource_group=resource_group,
                                              **kwargs)
                if 'error' not in vnets:
                    for vnet in vnets:
                        subnets = subnets_list(virtual_network=vnet,
                                               resource_group=resource_group,
                                               **kwargs)
                        if kwargs['frontend_ip_configurations'][idx][
                                'subnet'] in subnets:
                            kwargs['frontend_ip_configurations'][idx][
                                'subnet'] = {
                                    'id':
                                    str(subnets[
                                        kwargs['frontend_ip_configurations']
                                        [idx]['subnet']]['id'])
                                }
                            break

    id_url = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/{3}/{4}'

    if isinstance(kwargs.get('load_balancing_rules'), list):
        for idx in six_range(0, len(kwargs['load_balancing_rules'])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if 'frontend_ip_configuration' in kwargs['load_balancing_rules'][
                    idx]:
                kwargs['load_balancing_rules'][idx][
                    'frontend_ip_configuration'] = {
                        'id':
                        id_url.format(
                            kwargs.get('subscription_id'), resource_group,
                            name, 'frontendIPConfigurations',
                            kwargs['load_balancing_rules'][idx]
                            ['frontend_ip_configuration'])
                    }
            if 'backend_address_pool' in kwargs['load_balancing_rules'][idx]:
                kwargs['load_balancing_rules'][idx]['backend_address_pool'] = {
                    'id':
                    id_url.format(
                        kwargs.get('subscription_id'), resource_group, name,
                        'backendAddressPools', kwargs['load_balancing_rules']
                        [idx]['backend_address_pool'])
                }
            if 'probe' in kwargs['load_balancing_rules'][idx]:
                kwargs['load_balancing_rules'][idx]['probe'] = {
                    'id':
                    id_url.format(kwargs.get('subscription_id'),
                                  resource_group, name, 'probes',
                                  kwargs['load_balancing_rules'][idx]['probe'])
                }

    if isinstance(kwargs.get('inbound_nat_rules'), list):
        for idx in six_range(0, len(kwargs['inbound_nat_rules'])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if 'frontend_ip_configuration' in kwargs['inbound_nat_rules'][idx]:
                kwargs['inbound_nat_rules'][idx][
                    'frontend_ip_configuration'] = {
                        'id':
                        id_url.format(
                            kwargs.get('subscription_id'), resource_group,
                            name, 'frontendIPConfigurations',
                            kwargs['inbound_nat_rules'][idx]
                            ['frontend_ip_configuration'])
                    }

    if isinstance(kwargs.get('inbound_nat_pools'), list):
        for idx in six_range(0, len(kwargs['inbound_nat_pools'])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if 'frontend_ip_configuration' in kwargs['inbound_nat_pools'][idx]:
                kwargs['inbound_nat_pools'][idx][
                    'frontend_ip_configuration'] = {
                        'id':
                        id_url.format(
                            kwargs.get('subscription_id'), resource_group,
                            name, 'frontendIPConfigurations',
                            kwargs['inbound_nat_pools'][idx]
                            ['frontend_ip_configuration'])
                    }

    if isinstance(kwargs.get('outbound_nat_rules'), list):
        for idx in six_range(0, len(kwargs['outbound_nat_rules'])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if 'frontend_ip_configuration' in kwargs['outbound_nat_rules'][
                    idx]:
                kwargs['outbound_nat_rules'][idx][
                    'frontend_ip_configuration'] = {
                        'id':
                        id_url.format(
                            kwargs.get('subscription_id'), resource_group,
                            name, 'frontendIPConfigurations',
                            kwargs['outbound_nat_rules'][idx]
                            ['frontend_ip_configuration'])
                    }
            if 'backend_address_pool' in kwargs['outbound_nat_rules'][idx]:
                kwargs['outbound_nat_rules'][idx]['backend_address_pool'] = {
                    'id':
                    id_url.format(
                        kwargs.get('subscription_id'), resource_group, name,
                        'backendAddressPools', kwargs['outbound_nat_rules']
                        [idx]['backend_address_pool'])
                }

    try:
        lbmodel = await hub.exec.utils.azurerm.create_object_model(
            'network', 'LoadBalancer', **kwargs)
    except TypeError as exc:
        result = {
            'error':
            'The object model could not be built. ({0})'.format(str(exc))
        }
        return result

    try:
        load_balancer = netconn.load_balancers.create_or_update(
            resource_group_name=resource_group,
            load_balancer_name=name,
            parameters=lbmodel)
        load_balancer.wait()
        lb_result = load_balancer.result()
        result = lb_result.as_dict()
    except CloudError as exc:
        await hub.exec.utils.azurerm.log_cloud_error('network', str(exc),
                                                     **kwargs)
        result = {'error': str(exc)}
    except SerializationError as exc:
        result = {
            'error':
            'The object model could not be parsed. ({0})'.format(str(exc))
        }

    return result
示例#9
0
async def create_or_update(hub, ctx, name, resource_group, **kwargs):
    """
    .. versionadded:: 1.0.0

    Create or update a load balancer within a specified resource group.

    :param name: The name of the load balancer to create.

    :param resource_group: The resource group name assigned to the load balancer.

    CLI Example:

    .. code-block:: bash

        azurerm.network.load_balancer.create_or_update test_name test_group

    """
    if "location" not in kwargs:
        rg_props = await hub.exec.azurerm.resource.group.get(
            ctx, resource_group, **kwargs)

        if "error" in rg_props:
            log.error(
                "Unable to determine location from resource group specified.")
            return {
                "error":
                "Unable to determine location from resource group specified."
            }
        kwargs["location"] = rg_props["location"]

    result = {}
    netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)

    if isinstance(kwargs.get("frontend_ip_configurations"), list):
        for idx in six_range(0, len(kwargs["frontend_ip_configurations"])):
            # Use Public IP Address name to link to the ID of an existing Public IP
            if "public_ip_address" in kwargs["frontend_ip_configurations"][
                    idx]:
                pub_ip = await hub.exec.azurerm.network.public_ip_address.get(
                    ctx=ctx,
                    name=kwargs["frontend_ip_configurations"][idx]
                    ["public_ip_address"],
                    resource_group=resource_group,
                    **kwargs,
                )
                if "error" not in pub_ip:
                    kwargs["frontend_ip_configurations"][idx][
                        "public_ip_address"] = {
                            "id": str(pub_ip["id"])
                        }
            # Use Subnet name to link to the ID of an existing Subnet
            elif "subnet" in kwargs["frontend_ip_configurations"][idx]:
                vnets = await hub.exec.azurerm.network.virtual_network.list(
                    ctx=ctx, resource_group=resource_group, **kwargs)
                if "error" not in vnets:
                    for vnet in vnets:
                        subnets = await hub.exec.azurerm.network.virtual_network.subnets_list(
                            ctx=ctx,
                            virtual_network=vnet,
                            resource_group=resource_group,
                            **kwargs,
                        )
                        if (kwargs["frontend_ip_configurations"][idx]["subnet"]
                                in subnets):
                            kwargs["frontend_ip_configurations"][idx][
                                "subnet"] = {
                                    "id":
                                    str(subnets[
                                        kwargs["frontend_ip_configurations"]
                                        [idx]["subnet"]]["id"])
                                }
                            break

    id_url = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/{3}/{4}"

    if isinstance(kwargs.get("load_balancing_rules"), list):
        for idx in six_range(0, len(kwargs["load_balancing_rules"])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if "frontend_ip_configuration" in kwargs["load_balancing_rules"][
                    idx]:
                kwargs["load_balancing_rules"][idx][
                    "frontend_ip_configuration"] = {
                        "id":
                        id_url.format(
                            kwargs.get("subscription_id"),
                            resource_group,
                            name,
                            "frontendIPConfigurations",
                            kwargs["load_balancing_rules"][idx]
                            ["frontend_ip_configuration"],
                        )
                    }
            if "backend_address_pool" in kwargs["load_balancing_rules"][idx]:
                kwargs["load_balancing_rules"][idx]["backend_address_pool"] = {
                    "id":
                    id_url.format(
                        kwargs.get("subscription_id"),
                        resource_group,
                        name,
                        "backendAddressPools",
                        kwargs["load_balancing_rules"][idx]
                        ["backend_address_pool"],
                    )
                }
            if "probe" in kwargs["load_balancing_rules"][idx]:
                kwargs["load_balancing_rules"][idx]["probe"] = {
                    "id":
                    id_url.format(
                        kwargs.get("subscription_id"),
                        resource_group,
                        name,
                        "probes",
                        kwargs["load_balancing_rules"][idx]["probe"],
                    )
                }

    if isinstance(kwargs.get("inbound_nat_rules"), list):
        for idx in six_range(0, len(kwargs["inbound_nat_rules"])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if "frontend_ip_configuration" in kwargs["inbound_nat_rules"][idx]:
                kwargs["inbound_nat_rules"][idx][
                    "frontend_ip_configuration"] = {
                        "id":
                        id_url.format(
                            kwargs.get("subscription_id"),
                            resource_group,
                            name,
                            "frontendIPConfigurations",
                            kwargs["inbound_nat_rules"][idx]
                            ["frontend_ip_configuration"],
                        )
                    }

    if isinstance(kwargs.get("inbound_nat_pools"), list):
        for idx in six_range(0, len(kwargs["inbound_nat_pools"])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if "frontend_ip_configuration" in kwargs["inbound_nat_pools"][idx]:
                kwargs["inbound_nat_pools"][idx][
                    "frontend_ip_configuration"] = {
                        "id":
                        id_url.format(
                            kwargs.get("subscription_id"),
                            resource_group,
                            name,
                            "frontendIPConfigurations",
                            kwargs["inbound_nat_pools"][idx]
                            ["frontend_ip_configuration"],
                        )
                    }

    if isinstance(kwargs.get("outbound_rules"), list):
        for idx in six_range(0, len(kwargs["outbound_rules"])):
            # Link to sub-objects which might be created at the same time as the load balancer
            if "frontend_ip_configuration" in kwargs["outbound_rules"][idx]:
                kwargs["outbound_rules"][idx]["frontend_ip_configuration"] = {
                    "id":
                    id_url.format(
                        kwargs.get("subscription_id"),
                        resource_group,
                        name,
                        "frontendIPConfigurations",
                        kwargs["outbound_rules"][idx]
                        ["frontend_ip_configuration"],
                    )
                }
            if "backend_address_pool" in kwargs["outbound_rules"][idx]:
                kwargs["outbound_rules"][idx]["backend_address_pool"] = {
                    "id":
                    id_url.format(
                        kwargs.get("subscription_id"),
                        resource_group,
                        name,
                        "backendAddressPools",
                        kwargs["outbound_rules"][idx]["backend_address_pool"],
                    )
                }

    try:
        lbmodel = await hub.exec.azurerm.utils.create_object_model(
            "network", "LoadBalancer", **kwargs)
    except TypeError as exc:
        result = {
            "error":
            "The object model could not be built. ({0})".format(str(exc))
        }
        return result

    try:
        load_balancer = netconn.load_balancers.create_or_update(
            resource_group_name=resource_group,
            load_balancer_name=name,
            parameters=lbmodel,
        )

        load_balancer.wait()
        result = load_balancer.result().as_dict()
    except CloudError as exc:
        await hub.exec.azurerm.utils.log_cloud_error("network", str(exc),
                                                     **kwargs)
        result = {"error": str(exc)}
    except SerializationError as exc:
        result = {
            "error":
            "The object model could not be parsed. ({0})".format(str(exc))
        }

    return result
示例#10
0
async def present(hub,
                  ctx,
                  name,
                  zone_name,
                  resource_group,
                  record_type,
                  if_match=None,
                  if_none_match=None,
                  etag=None,
                  metadata=None,
                  ttl=None,
                  arecords=None,
                  aaaa_records=None,
                  mx_records=None,
                  ns_records=None,
                  ptr_records=None,
                  srv_records=None,
                  txt_records=None,
                  cname_record=None,
                  soa_record=None,
                  caa_records=None,
                  connection_auth=None,
                  **kwargs):
    '''
    .. versionadded:: 1.0.0

    Ensure a record set exists in a DNS zone.

    :param name:
        The name of the record set, relative to the name of the zone.

    :param zone_name:
        Name of the DNS zone (without a terminating dot).

    :param resource_group:
        The resource group assigned to the DNS zone.

    :param record_type:
        The type of DNS record in this record set. Record sets of type SOA can be updated but not created
        (they are created when the DNS zone is created). Possible values include: 'A', 'AAAA', 'CAA', 'CNAME',
        'MX', 'NS', 'PTR', 'SOA', 'SRV', 'TXT'

    :param if_match:
        The etag of the record set. Omit this value to always overwrite the current record set. Specify the last-seen
        etag value to prevent accidentally overwritting any concurrent changes.

    :param if_none_match:
        Set to '*' to allow a new record set to be created, but to prevent updating an existing record set. Other values
        will be ignored.

    :param etag:
        The etag of the record set. `Etags <https://docs.microsoft.com/en-us/azure/dns/dns-zones-records#etags>`_ are
        used to handle concurrent changes to the same resource safely.

    :param metadata:
        A dictionary of strings can be passed as tag metadata to the record set object.

    :param ttl:
        The TTL (time-to-live) of the records in the record set. Required when specifying record information.

    :param arecords:
        The list of A records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.arecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param aaaa_records:
        The list of AAAA records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.aaaarecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param mx_records:
        The list of MX records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.mxrecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param ns_records:
        The list of NS records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.nsrecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param ptr_records:
        The list of PTR records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.ptrrecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param srv_records:
        The list of SRV records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.srvrecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param txt_records:
        The list of TXT records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.txtrecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param cname_record:
        The CNAME record in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.cnamerecord?view=azure-python>`_
        to create a dictionary representing the record object.

    :param soa_record:
        The SOA record in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.soarecord?view=azure-python>`_
        to create a dictionary representing the record object.

    :param caa_records:
        The list of CAA records in the record set. View the
        `Azure SDK documentation <https://docs.microsoft.com/en-us/python/api/azure.mgmt.dns.models.caarecord?view=azure-python>`_
        to create a list of dictionaries representing the record objects.

    :param connection_auth:
        A dict with subscription and authentication parameters to be used in connecting to the
        Azure Resource Manager API.

    Example usage:

    .. code-block:: yaml

        Ensure record set exists:
            azurerm.dns.record_set.present:
                - name: web
                - zone_name: contoso.com
                - resource_group: my_rg
                - record_type: A
                - ttl: 300
                - arecords:
                  - ipv4_address: 10.0.0.1
                - metadata:
                    how_awesome: very
                    contact_name: Elmer Fudd Gantry
                - connection_auth: {{ profile }}

    '''
    ret = {'name': name, 'result': False, 'comment': '', 'changes': {}}

    record_vars = [
        'arecords', 'aaaa_records', 'mx_records', 'ns_records', 'ptr_records',
        'srv_records', 'txt_records', 'cname_record', 'soa_record',
        'caa_records'
    ]

    if not isinstance(connection_auth, dict):
        ret['comment'] = 'Connection information must be specified via connection_auth dictionary!'
        return ret

    rec_set = await hub.exec.azurerm.dns.record_set.get(
        name,
        zone_name,
        resource_group,
        record_type,
        azurearm_log_level='info',
        **connection_auth)

    if 'error' not in rec_set:
        metadata_changes = await hub.exec.utils.dictdiffer.deep_diff(
            rec_set.get('metadata', {}), metadata or {})
        if metadata_changes:
            ret['changes']['metadata'] = metadata_changes

        for record_str in record_vars:
            # pylint: disable=eval-used
            record = eval(record_str)
            if record:
                if not ttl:
                    ret['comment'] = 'TTL is required when specifying record information!'
                    return ret
                if not rec_set.get(record_str):
                    ret['changes'] = {'new': {record_str: record}}
                    continue
                if record_str[-1] != 's':
                    if not isinstance(record, dict):
                        ret['comment'] = '{0} record information must be specified as a dictionary!'.format(
                            record_str)
                        return ret
                    for k, v in record.items():
                        if v != rec_set[record_str].get(k):
                            ret['changes'] = {'new': {record_str: record}}
                elif record_str[-1] == 's':
                    if not isinstance(record, list):
                        ret['comment'] = '{0} record information must be specified as a list of dictionaries!'.format(
                            record_str)
                        return ret
                    local, remote = [
                        sorted(config)
                        for config in (record, rec_set[record_str])
                    ]
                    for idx in six_range(0, len(local)):
                        for key in local[idx]:
                            local_val = local[idx][key]
                            remote_val = remote[idx].get(key)
                            if isinstance(local_val, six.string_types):
                                local_val = local_val.lower()
                            if isinstance(remote_val, six.string_types):
                                remote_val = remote_val.lower()
                            if local_val != remote_val:
                                ret['changes'] = {'new': {record_str: record}}

        if not ret['changes']:
            ret['result'] = True
            ret['comment'] = 'Record set {0} is already present.'.format(name)
            return ret

        if ctx['test']:
            ret['result'] = None
            ret['comment'] = 'Record set {0} would be updated.'.format(name)
            return ret

    else:
        ret['changes'] = {
            'old': {},
            'new': {
                'name': name,
                'zone_name': zone_name,
                'resource_group': resource_group,
                'record_type': record_type,
                'etag': etag,
                'metadata': metadata,
                'ttl': ttl,
            }
        }
        for record in record_vars:
            # pylint: disable=eval-used
            if eval(record):
                # pylint: disable=eval-used
                ret['changes']['new'][record] = eval(record)

    if ctx['test']:
        ret['comment'] = 'Record set {0} would be created.'.format(name)
        ret['result'] = None
        return ret

    rec_set_kwargs = kwargs.copy()
    rec_set_kwargs.update(connection_auth)

    rec_set = await hub.exec.azurerm.dns.record_set.create_or_update(
        name=name,
        zone_name=zone_name,
        resource_group=resource_group,
        record_type=record_type,
        if_match=if_match,
        if_none_match=if_none_match,
        etag=etag,
        ttl=ttl,
        metadata=metadata,
        arecords=arecords,
        aaaa_records=aaaa_records,
        mx_records=mx_records,
        ns_records=ns_records,
        ptr_records=ptr_records,
        srv_records=srv_records,
        txt_records=txt_records,
        cname_record=cname_record,
        soa_record=soa_record,
        caa_records=caa_records,
        **rec_set_kwargs)

    if 'error' not in rec_set:
        ret['result'] = True
        ret['comment'] = 'Record set {0} has been created.'.format(name)
        return ret

    ret['comment'] = 'Failed to create record set {0}! ({1})'.format(
        name, rec_set.get('error'))
    return ret
示例#11
0
    def build_from_token_counts(self,
                                token_counts,
                                min_count,
                                num_iterations=4,
                                reserved_tokens=None,
                                max_subtoken_length=None):
        """Train a SubwordTextEncoder based on a dictionary of word counts.

    Args:
      token_counts: a dictionary of Unicode strings to int.
      min_count: an integer - discard subtokens with lower counts.
      num_iterations: an integer.  how many iterations of refinement.
      reserved_tokens: List of reserved tokens. The global variable
        `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
        argument is `None`, it will use `RESERVED_TOKENS`.
      max_subtoken_length: Maximum length of a subtoken. If this is not set,
        then the runtime and memory use of creating the vocab is quadratic in
        the length of the longest token. If this is set, then it is instead
        O(max_subtoken_length * length of longest token).

    Raises:
      ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
        is not clear what the space is being reserved for, or when it will be
        filled in.
    """
        if reserved_tokens is None:
            reserved_tokens = RESERVED_TOKENS
        else:
            # There is not complete freedom in replacing RESERVED_TOKENS.
            for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
                if default != proposed:
                    raise ValueError("RESERVED_TOKENS must be a prefix of "
                                     "reserved_tokens.")

        # Initialize the alphabet. Note, this must include reserved tokens or it can
        # result in encoding failures.
        alphabet_tokens = chain(
            iterkeys(token_counts),
            [self.native_to_unicode(t) for t in reserved_tokens])

        self._init_alphabet_from_tokens(alphabet_tokens)

        # Bootstrap the initial list of subtokens with the characters from the
        # alphabet plus the escaping characters.
        self._init_subtokens_from_list(list(self._alphabet),
                                       reserved_tokens=reserved_tokens)

        # We build iteratively.  On each iteration, we segment all the words,
        # then count the resulting potential subtokens, keeping the ones
        # with high enough counts for our new vocabulary.
        if min_count < 1:
            min_count = 1
        for i in six_range(num_iterations):
            #tf.logging.info("Iteration {0}".format(i))

            # Collect all substrings of the encoded token that break along current
            # subtoken boundaries.
            subtoken_counts = collections.defaultdict(int)
            for token, count in iteritems(token_counts):
                iter_start_time = time.time()
                escaped_token = self._escape_token(token, self._alphabet)
                subtokens = self._escaped_token_to_subtoken_strings(
                    escaped_token)
                start = 0
                for subtoken in subtokens:
                    last_position = len(escaped_token) + 1
                    if max_subtoken_length is not None:
                        last_position = min(last_position,
                                            start + max_subtoken_length)

                    for end in six_range(start + 1, last_position):
                        new_subtoken = escaped_token[start:end]
                        subtoken_counts[new_subtoken] += count
                    start += len(subtoken)
                iter_time_secs = time.time() - iter_start_time
                if iter_time_secs > 0.1:
                    print(u"Processing token [{0}] took {1} seconds, consider "
                          "setting Text2TextProblem.max_subtoken_length to a "
                          "smaller value.".format(token, iter_time_secs))

            # Array of sets of candidate subtoken strings, by length.
            len_to_subtoken_strings = []
            for subtoken_string, count in iteritems(subtoken_counts):
                lsub = len(subtoken_string)
                if count >= min_count:
                    while len(len_to_subtoken_strings) <= lsub:
                        len_to_subtoken_strings.append(set())
                    len_to_subtoken_strings[lsub].add(subtoken_string)

            # Consider the candidates longest to shortest, so that if we accept
            # a longer subtoken string, we can decrement the counts of its prefixes.
            new_subtoken_strings = []
            for lsub in six_range(len(len_to_subtoken_strings) - 1, 0, -1):
                subtoken_strings = len_to_subtoken_strings[lsub]
                for subtoken_string in subtoken_strings:
                    count = subtoken_counts[subtoken_string]
                    if count >= min_count:
                        # Exclude alphabet tokens here, as they must be included later,
                        # explicitly, regardless of count.
                        if subtoken_string not in self._alphabet:
                            new_subtoken_strings.append(
                                (count, subtoken_string))
                        for l in six_range(1, lsub):
                            subtoken_counts[subtoken_string[:l]] -= count

            # Include the alphabet explicitly to guarantee all strings are encodable.
            new_subtoken_strings.extend(
                (subtoken_counts.get(a, 0), a) for a in self._alphabet)
            new_subtoken_strings.sort(reverse=True)

            # Reinitialize to the candidate vocabulary.
            new_subtoken_strings = [
                subtoken for _, subtoken in new_subtoken_strings
            ]
            if reserved_tokens:
                escaped_reserved_tokens = [
                    self._escape_token(self.native_to_unicode(t),
                                       self._alphabet) for t in reserved_tokens
                ]
                new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings

            self._init_subtokens_from_list(new_subtoken_strings)
示例#12
0
 def generate_ids(self, key=None, entity_type=None, count=1):
     base_entity_id = self._generate_id(key=key)
     entity_ids = ['%s-%s' % (base_entity_id, i) for i in six_range(count)]
     self._entity_ids[entity_type].extend(entity_ids)
     return base_entity_id, entity_ids