def configure_second_admin_cobbler(self):
    dhcp_template = '/etc/cobbler/dnsmasq.template'
    remote = self.get_admin_remote()
    main_admin_ip = str(self.nodes().admin.
                        get_ip_address_by_network_name(self.admin_net))
    second_admin_ip = str(self.nodes().admin.
                          get_ip_address_by_network_name(self.admin_net2))
    second_admin_network = self.get_network(self.admin_net2).split('/')[0]
    second_admin_netmask = self.get_net_mask(self.admin_net2)
    network = IPNetwork('{0}/{1}'.format(second_admin_network,
                                         second_admin_netmask))
    discovery_subnet = [net for net in network.iter_subnets(1)][-1]
    first_discovery_address = str(discovery_subnet.network)
    last_discovery_address = str(discovery_subnet.broadcast - 1)
    new_range = ('dhcp-range=internal2,{0},{1},{2}\\n'
                 'dhcp-option=net:internal2,option:router,{3}\\n'
                 'dhcp-boot=net:internal2,pxelinux.0,boothost,{4}\\n').\
        format(first_discovery_address, last_discovery_address,
               second_admin_netmask, second_admin_ip, main_admin_ip)
    cmd = ("dockerctl shell cobbler sed -r '$a \{0}' -i {1};"
           "dockerctl shell cobbler cobbler sync").format(new_range,
                                                          dhcp_template)
    result = remote.execute(cmd)
    assert_equal(result['exit_code'], 0, ('Failed to add second admin'
                 'network to cobbler: {0}').format(result))
def configure_second_admin_cobbler(self):
    dhcp_template = '/etc/cobbler/dnsmasq.template'
    remote = self.d_env.get_admin_remote()
    admin_net2 = self.d_env.admin_net2
    second_admin_if = settings.INTERFACES.get(admin_net2)
    second_admin_ip = str(
        self.d_env.nodes().admin.get_ip_address_by_network_name(admin_net2))

    admin_net2_object = self.d_env.get_network(name=admin_net2)
    second_admin_network = admin_net2_object.ip.ip
    second_admin_netmask = admin_net2_object.ip.netmask
    network = IPNetwork('{0}/{1}'.format(second_admin_network,
                                         second_admin_netmask))
    discovery_subnet = [net for net in network.iter_subnets(1)][-1]
    first_discovery_address = str(discovery_subnet.network)
    last_discovery_address = str(discovery_subnet.broadcast - 1)
    new_range = ('interface={4}\\n'
                 'dhcp-range=internal2,{0},{1},{2}\\n'
                 'dhcp-option=net:internal2,option:router,{3}\\n'
                 'pxe-service=net:internal2,x86PC,"Install",pxelinux,{3}\\n'
                 'dhcp-boot=net:internal2,pxelinux.0,boothost,{3}\\n').\
        format(first_discovery_address, last_discovery_address,
               second_admin_netmask, second_admin_ip, second_admin_if)
    cmd = ("dockerctl shell cobbler sed -r '$a \{0}' -i {1};"
           "dockerctl shell cobbler cobbler sync").format(new_range,
                                                          dhcp_template)
    result = remote.execute(cmd)
    assert_equal(result['exit_code'], 0, ('Failed to add second admin'
                 'network to cobbler: {0}').format(result))
Exemple #3
0
    def clean(self):
        cdata = self.cleaned_data

        ipv4key = 'int_ipv4address'
        ipv4addr = cdata.get(ipv4key)
        ipv4addr_b = cdata.get('int_ipv4address_b')
        ipv4net = cdata.get("int_v4netmaskbit")

        if ipv4addr and ipv4addr_b and ipv4net:
            network = IPNetwork('%s/%s' % (ipv4addr, ipv4net))
            if not network.overlaps(
                IPNetwork('%s/%s' % (ipv4addr_b, ipv4net))
            ):
                self._errors['int_ipv4address_b'] = self.error_class([
                    _('The IP must be within the same network')
                ])

        ipv6addr = cdata.get("int_ipv6address")
        ipv6net = cdata.get("int_v6netmaskbit")
        ipv4 = True if ipv4addr and ipv4net else False
        ipv6 = True if ipv6addr and ipv6net else False

        # IF one field of ipv4 is entered, require the another
        if (ipv4addr or ipv4net) and not ipv4:
            if not (ipv4addr or ipv4addr_b) and not self._errors.get(ipv4key):
                self._errors[ipv4key] = self.error_class([
                    _("You have to specify IPv4 address as well"),
                ])
            if not ipv4net and 'int_v4netmaskbit' not in self._errors:
                self._errors['int_v4netmaskbit'] = self.error_class([
                    _("You have to choose IPv4 netmask as well"),
                ])

        # IF one field of ipv6 is entered, require the another
        if (ipv6addr or ipv6net) and not ipv6:
            if not ipv6addr and not self._errors.get('int_ipv6address'):
                self._errors['int_ipv6address'] = self.error_class([
                    _("You have to specify IPv6 address as well"),
                ])
            if not ipv6net:
                self._errors['int_v6netmaskbit'] = self.error_class([
                    _("You have to choose IPv6 netmask as well"),
                ])

        if ipv6 and ipv4:
            self._errors['__all__'] = self.error_class([
                _("You have to choose between IPv4 or IPv6"),
            ])

        vip = cdata.get("int_vip")
        if vip and not ipv4addr_b:
            self._errors['int_ipv4address_b'] = self.error_class([
                _("This field is required for failover")
            ])
        if vip and not ipv4addr:
            self._errors['int_ipv4address'] = self.error_class([
                _("This field is required for failover")
            ])

        return cdata
Exemple #4
0
    def clean_alias_v4netmaskbit(self):
        vip = self.cleaned_data.get("alias_vip")
        ip = self.cleaned_data.get("alias_v4address")
        nw = self.cleaned_data.get("alias_v4netmaskbit")
        if not nw or not ip:
            return nw
        network = IPNetwork("%s/%s" % (ip, nw))

        if vip:
            if not network.overlaps(IPNetwork("%s/%s" % (vip, nw))):
                raise forms.ValidationError(_("Virtual IP is not in the same network"))

        if self.instance.id and self.instance.alias_interface.int_interface.startswith("carp"):
            return nw
        used_networks = []
        qs = models.Interfaces.objects.all().exclude(int_interface__startswith="carp")
        if self.instance.id:
            qs = qs.exclude(id=self.instance.alias_interface.id)
        elif self.parent.instance.id:
            qs = qs.exclude(id=self.parent.instance.id)
        for iface in qs:
            if iface.int_v4netmaskbit:
                used_networks.append(IPNetwork("%s/%s" % (iface.int_ipv4address, iface.int_v4netmaskbit)))
            for alias in iface.alias_set.all():
                if alias.alias_v4netmaskbit:
                    used_networks.append(IPNetwork("%s/%s" % (alias.alias_v4address, alias.alias_v4netmaskbit)))

        for unet in used_networks:
            if unet.overlaps(network):
                raise forms.ValidationError(_("The network %s is already in use by another NIC.") % (network.masked(),))
        return nw
Exemple #5
0
    def clean_int_v4netmaskbit(self):
        ip = self.cleaned_data.get("int_ipv4address")
        nw = self.cleaned_data.get("int_v4netmaskbit")
        if not nw or not ip:
            return nw
        network = IPNetwork('%s/%s' % (ip, nw))
        used_networks = []
        qs = models.Interfaces.objects.all()
        if self.instance.id:
            qs = qs.exclude(id=self.instance.id)
        for iface in qs:
            if iface.int_v4netmaskbit:
                used_networks.append(
                    IPNetwork('%s/%s' % (
                        iface.int_ipv4address,
                        iface.int_v4netmaskbit,
                    ))
                )
            for alias in iface.alias_set.all():
                if alias.alias_v4netmaskbit:
                    used_networks.append(
                        IPNetwork('%s/%s' % (
                            alias.alias_v4address,
                            alias.alias_v4netmaskbit,
                        ))
                    )

        for unet in used_networks:
            if unet.overlaps(network):
                raise forms.ValidationError(
                    _("The network %s is already in use by another NIC.") % (
                        network.masked(),
                    )
                )
        return nw
Exemple #6
0
    def clean(self):
        cdata = self.cleaned_data

        ipv4vip = cdata.get("alias_vip")
        ipv4addr = cdata.get("alias_v4address")
        ipv4net = cdata.get("alias_v4netmaskbit")
        ipv6addr = cdata.get("alias_v6address")
        ipv6net = cdata.get("alias_v6netmaskbit")
        ipv4 = True if ipv4addr and ipv4net else False
        ipv6 = True if ipv6addr and ipv6net else False

        # IF one field of ipv4 is entered, require the another
        if (ipv4addr or ipv4net) and not ipv4:
            if not ipv4addr and not self._errors.get('alias_v4address'):
                self._errors['alias_v4address'] = self.error_class([
                    _("You have to specify IPv4 address as well per alias"),
                ])
            if not ipv4net and 'alias_v4netmaskbit' not in self._errors:
                self._errors['alias_v4netmaskbit'] = self.error_class([
                    _("You have to choose IPv4 netmask as well per alias"),
                ])

        # IF one field of ipv6 is entered, require the another
        if (ipv6addr or ipv6net) and not ipv6:
            if not ipv6addr and not self._errors.get('alias_v6address'):
                self._errors['alias_v6address'] = self.error_class([
                    _("You have to specify IPv6 address as well per alias"),
                ])
            if not ipv6net:
                self._errors['alias_v6netmaskbit'] = self.error_class([
                    _("You have to choose IPv6 netmask as well per alias"),
                ])

        if ipv6 and ipv4:
            self._errors['__all__'] = self.error_class([
                _("You have to choose between IPv4 or IPv6 per alias"),
            ])

        configured_vip = False
        if ipv4vip and hasattr(self, 'parent'):
            iface = self.parent.instance
            ip = IPNetwork('%s/32' % ipv4vip)
            network = IPNetwork('%s/%s' % (
                iface.int_ipv4address,
                iface.int_v4netmaskbit,
            ))
            if ip.overlaps(network):
                configured_vip = True

        if (
            not configured_vip and not ipv6 and not (ipv6addr or ipv6net) and
            not ipv4 and not (ipv4addr or ipv4net)
        ):
            self._errors['__all__'] = self.error_class([
                _("You must specify either an valid IPv4 or IPv6 with maskbit "
                    "per alias"),
            ])

        return cdata
Exemple #7
0
    def clean(self):
        cdata = self.cleaned_data

        _n = notifier()
        if not _n.is_freenas() and _n.failover_licensed():
            from freenasUI.failover.models import Failover

            try:
                if Failover.objects.all()[0].disabled is False:
                    self._errors["__all__"] = self.error_class(
                        [_("Failover needs to be disabled to perform network " "changes.")]
                    )
            except:
                log.warn("Failed to verify failover status", exc_info=True)

        ipv4key = "int_ipv4address"
        ipv4addr = cdata.get(ipv4key)
        ipv4addr_b = cdata.get("int_ipv4address_b")
        ipv4net = cdata.get("int_v4netmaskbit")

        if ipv4addr and ipv4addr_b and ipv4net:
            network = IPNetwork("%s/%s" % (ipv4addr, ipv4net))
            if not network.overlaps(IPNetwork("%s/%s" % (ipv4addr_b, ipv4net))):
                self._errors["int_ipv4address_b"] = self.error_class([_("The IP must be within the same network")])

        ipv6addr = cdata.get("int_ipv6address")
        ipv6net = cdata.get("int_v6netmaskbit")
        ipv4 = True if ipv4addr and ipv4net else False
        ipv6 = True if ipv6addr and ipv6net else False

        # IF one field of ipv4 is entered, require the another
        if (ipv4addr or ipv4net) and not ipv4:
            if not (ipv4addr or ipv4addr_b) and not self._errors.get(ipv4key):
                self._errors[ipv4key] = self.error_class([_("You have to specify IPv4 address as well")])
            if not ipv4net and "int_v4netmaskbit" not in self._errors:
                self._errors["int_v4netmaskbit"] = self.error_class([_("You have to choose IPv4 netmask as well")])

        # IF one field of ipv6 is entered, require the another
        if (ipv6addr or ipv6net) and not ipv6:
            if not ipv6addr and not self._errors.get("int_ipv6address"):
                self._errors["int_ipv6address"] = self.error_class([_("You have to specify IPv6 address as well")])
            if not ipv6net:
                self._errors["int_v6netmaskbit"] = self.error_class([_("You have to choose IPv6 netmask as well")])

        if ipv6 and ipv4:
            self._errors["__all__"] = self.error_class([_("You have to choose between IPv4 or IPv6")])

        vip = cdata.get("int_vip")
        dhcp = cdata.get("int_dhcp")
        if not dhcp:
            if vip and not ipv4addr_b:
                self._errors["int_ipv4address_b"] = self.error_class([_("This field is required for failover")])
            if vip and not ipv4addr:
                self._errors["int_ipv4address"] = self.error_class([_("This field is required for failover")])

        return cdata
    def generate_networks_for_template(self, template, ip_nets,
                                       ip_prefixlen):
        """Slice network to subnets for template.

        Generate networks from network template and ip_nets descriptions
        for node groups and value to slice that descriptions. ip_nets is a
        dict with key named as nodegroup and strings values for with
        description of network for that nodegroup in format '127.0.0.1/24'
        to be sliced in pieces for networks. ip_prefixlen - the amount the
        network prefix length should be sliced by. 24 will create networks
        '127.0.0.1/24' from network '127.0.0.1/16'.

        :param template: Yaml template with network assignments on interfaces.
        :param ip_nets: Dict with network descriptions.
        :param ip_prefixlen: Integer for slicing network prefix.
        :return: Data to be used to assign networks to nodes.
        """
        networks_data = []
        nodegroups = self.fuel_web.client.get_nodegroups()
        for nodegroup, section in template['adv_net_template'].items():
            networks = [(n, section['network_assignments'][n]['ep'])
                        for n in section['network_assignments']]
            assert_true(any(n['name'] == nodegroup for n in nodegroups),
                        'Network templates contains settings for Node Group '
                        '"{0}", which does not exist!'.format(nodegroup))
            group_id = [n['id'] for n in nodegroups if
                        n['name'] == nodegroup][0]
            ip_network = IPNetwork(ip_nets[nodegroup])
            ip_subnets = ip_network.subnet(
                int(ip_prefixlen) - int(ip_network.prefixlen))
            for network, interface in networks:
                ip_subnet = ip_subnets.pop()
                networks_data.append(
                    {
                        'name': network,
                        'cidr': str(ip_subnet),
                        'group_id': group_id,
                        'interface': interface,
                        'gateway': None,
                        'meta': {
                            "notation": "ip_ranges",
                            "render_type": None,
                            "map_priority": 0,
                            "configurable": True,
                            "unmovable": False,
                            "use_gateway": False,
                            "render_addr_mask": None,
                            'ip_range': [str(ip_subnet[1]), str(ip_subnet[-2])]
                        }
                    }
                )
        return networks_data
Exemple #9
0
 def freeIps(self):
     """Number of free Ips left in this network.
     """
     freeips = 0
     try:
         net = IPNetwork(ipunwrap(self.id))
         freeips = int(math.pow(2, net.max_prefixlen - self.netmask) - self.countIpAddresses())
         if self.netmask > net.max_prefixlen:
             return freeips
         return freeips - 2
     except ValueError:
         for net in self.children():
             freeips += net.freeIps()
         return freeips
Exemple #10
0
def calcSubnet(cidr, positional=1):
  net = IPNetwork(args.CIDR)
  try:
    subnet = list(net.subnet(positional))
  except ValueError:
    finalcidr = int(cidr.split('/')[1]) + positional
    print "[%s] is out of range with [%s] positions away (A /%s, seriously?). Cannot calculate." % (cidr, positional, finalcidr)
    raise SystemExit

  newcidr = subnet[0].prefixlen
  count = len(subnet)

  print "[%s] hosts with [/%s] (diff of %s)" % (count, newcidr, positional)
  for i in subnet:
    print "--%s" % i
Exemple #11
0
def getNetworksFromRoutes():
    from scapy.all import conf, ltoa, read_routes
    from ipaddr    import IPNetwork, IPAddress

    ## Hide the 'no routes' warnings
    conf.verb = 0

    networks = []
    for nw, nm, gw, iface, addr in read_routes():
        n = IPNetwork( ltoa(nw) )
        (n.netmask, n.gateway, n.ipaddr) = [IPAddress(x) for x in [nm, gw, addr]]
        n.iface = iface
        if not n.compressed in networks:
            networks.append(n)

    return networks
Exemple #12
0
    def handle(self, *args, **options):
        if len(args) == 0:
            network, pattern = self.defaultnetwork, self.defaultpattern
        if len(args) == 1:
            network, pattern = IPNetwork(args[0]), self.defaultpattern
        if len(args) == 2:
            network, pattern = IPNetwork(args[0]), args[1]

        for ip in network.iterhosts():
            name = reverse(ip)
            content = pattern % dict(
                [(str(k), v) for k, v in enumerate(str(ip).split('.'))])

            # print "%s\tcreated: %s" % (
            #     Record.objects.get_or_create(domain=self.get_domain(content),
            #         name=content, type='A', defaults={'content': str(ip)}))
            print "%s\t\tcreated: %s" % (
                Record.objects.get_or_create(domain=self.get_domain(name),
                    name=name, type='PTR', defaults={'content': content}))
 def generate_networks_for_template(self, template, ip_network,
                                    ip_prefixlen):
     networks_data = []
     nodegroups = self.fuel_web.client.get_nodegroups()
     for nodegroup, section in template['adv_net_template'].items():
         networks = [(n, section['network_assignments'][n]['ep'])
                     for n in section['network_assignments']]
         assert_true(any(n['name'] == nodegroup for n in nodegroups),
                     'Network templates contains settings for Node Group '
                     '"{0}", which does not exist!'.format(nodegroup))
         group_id = [n['id'] for n in nodegroups if
                     n['name'] == nodegroup][0]
         ip_network = IPNetwork(ip_network)
         ip_subnets = ip_network.subnet(
             int(ip_prefixlen) - int(ip_network.prefixlen))
         for network, interface in networks:
             ip_subnet = ip_subnets.pop()
             networks_data.append(
                 {
                     'name': network,
                     'cidr': str(ip_subnet),
                     'group_id': group_id,
                     'interface': interface,
                     'gateway': None,
                     'meta': {
                         "notation": "ip_ranges",
                         "render_type": None,
                         "map_priority": 0,
                         "configurable": True,
                         "unmovable": False,
                         "use_gateway": False,
                         "render_addr_mask": None,
                         'ip_range': [str(ip_subnet[1]), str(ip_subnet[-2])]
                     }
                 }
             )
     return networks_data
Exemple #14
0
    def clean(self):
        cdata = self.cleaned_data

        _n = notifier()
        if (
            not _n.is_freenas() and _n.failover_licensed() and
            _n.failover_status() != 'SINGLE' and (
                self.instance.id and self.instance.int_vip
            )
        ):
            from freenasUI.failover.models import Failover
            try:
                if Failover.objects.all()[0].disabled is False:
                    self._errors['__all__'] = self.error_class([_(
                        'Failover needs to be disabled to perform network '
                        'changes.'
                    )])
            except:
                log.warn('Failed to verify failover status', exc_info=True)

        ipv4key = 'int_ipv4address'
        ipv4addr = cdata.get(ipv4key)
        ipv4addr_b = cdata.get('int_ipv4address_b')
        ipv4net = cdata.get("int_v4netmaskbit")

        if ipv4addr and ipv4addr_b and ipv4net:
            network = IPNetwork('%s/%s' % (ipv4addr, ipv4net))
            if not network.overlaps(
                IPNetwork('%s/%s' % (ipv4addr_b, ipv4net))
            ):
                self._errors['int_ipv4address_b'] = self.error_class([
                    _('The IP must be within the same network')
                ])

        ipv6addr = cdata.get("int_ipv6address")
        ipv6net = cdata.get("int_v6netmaskbit")
        ipv4 = True if ipv4addr and ipv4net else False
        ipv6 = True if ipv6addr and ipv6net else False

        # IF one field of ipv4 is entered, require the another
        if (ipv4addr or ipv4net) and not ipv4:
            if not (ipv4addr or ipv4addr_b) and not self._errors.get(ipv4key):
                self._errors[ipv4key] = self.error_class([
                    _("You have to specify IPv4 address as well"),
                ])
            if not ipv4net and 'int_v4netmaskbit' not in self._errors:
                self._errors['int_v4netmaskbit'] = self.error_class([
                    _("You have to choose IPv4 netmask as well"),
                ])

        # IF one field of ipv6 is entered, require the another
        if (ipv6addr or ipv6net) and not ipv6:
            if not ipv6addr and not self._errors.get('int_ipv6address'):
                self._errors['int_ipv6address'] = self.error_class([
                    _("You have to specify IPv6 address as well"),
                ])
            if not ipv6net:
                self._errors['int_v6netmaskbit'] = self.error_class([
                    _("You have to choose IPv6 netmask as well"),
                ])

        vip = cdata.get("int_vip")
        dhcp = cdata.get("int_dhcp")
        if not dhcp:
            if vip and not ipv4addr_b:
                self._errors['int_ipv4address_b'] = self.error_class([
                    _("This field is required for failover")
                ])
            if vip and not ipv4addr:
                self._errors['int_ipv4address'] = self.error_class([
                    _("This field is required for failover")
                ])
        else:
            cdata['int_ipv4address'] = ''
            cdata['int_v4netmaskbit'] = ''

        # API backward compatibility
        options = cdata.get('int_options')
        if options:
            reg = RE_MTU.search(options)
            if reg:
                cdata['int_mtu'] = int(reg.group(1))
                cdata['int_options'] = options.replace(reg.group(0), '')

        return cdata
Exemple #15
0
    def save_contexts(self, key, lines):
        """
        Save the provided key and lines as a context
        """

        if not key:
            return
        '''
            IP addresses specified in "network" statements, "ip prefix-lists"
            etc. can differ in the host part of the specification the user
            provides and what the running config displays. For example, user
            can specify 11.1.1.1/24, and the running config displays this as
            11.1.1.0/24. Ensure we don't do a needless operation for such
            lines. IS-IS & OSPFv3 have no "network" support.
        '''
        re_key_rt = re.match(r'(ip|ipv6)\s+route\s+([A-Fa-f:.0-9/]+)(.*)$',
                             key[0])
        if re_key_rt:
            addr = re_key_rt.group(2)
            if '/' in addr:
                try:
                    newaddr = IPNetwork(addr)
                    key[0] = '%s route %s/%s%s' % (
                        re_key_rt.group(1), newaddr.network, newaddr.prefixlen,
                        re_key_rt.group(3))
                except ValueError:
                    pass

        re_key_rt = re.match(
            r'(ip|ipv6)\s+prefix-list(.*)(permit|deny)\s+([A-Fa-f:.0-9/]+)(.*)$',
            key[0])
        if re_key_rt:
            addr = re_key_rt.group(4)
            if '/' in addr:
                try:
                    newaddr = '%s/%s' % (IPNetwork(addr).network,
                                         IPNetwork(addr).prefixlen)
                except ValueError:
                    newaddr = addr
            else:
                newaddr = addr

            legestr = re_key_rt.group(5)
            re_lege = re.search(r'(.*)le\s+(\d+)\s+ge\s+(\d+)(.*)', legestr)
            if re_lege:
                legestr = '%sge %s le %s%s' % (re_lege.group(
                    1), re_lege.group(3), re_lege.group(2), re_lege.group(4))
            re_lege = re.search(r'(.*)ge\s+(\d+)\s+le\s+(\d+)(.*)', legestr)

            if (re_lege and (
                (re_key_rt.group(1) == "ip" and re_lege.group(3) == "32") or
                (re_key_rt.group(1) == "ipv6" and re_lege.group(3) == "128"))):
                legestr = '%sge %s%s' % (re_lege.group(1), re_lege.group(2),
                                         re_lege.group(4))

            key[0] = '%s prefix-list%s%s %s%s' % (re_key_rt.group(
                1), re_key_rt.group(2), re_key_rt.group(3), newaddr, legestr)

        if lines and key[0].startswith('router bgp'):
            newlines = []
            for line in lines:
                re_net = re.match(r'network\s+([A-Fa-f:.0-9/]+)(.*)$', line)
                if re_net:
                    addr = re_net.group(1)
                    if '/' not in addr and key[0].startswith('router bgp'):
                        # This is most likely an error because with no
                        # prefixlen, BGP treats the prefixlen as 8
                        addr = addr + '/8'

                    try:
                        newaddr = IPNetwork(addr)
                        line = 'network %s/%s %s' % (newaddr.network,
                                                     newaddr.prefixlen,
                                                     re_net.group(2))
                        newlines.append(line)
                    except ValueError:
                        # Really this should be an error. Whats a network
                        # without an IP Address following it ?
                        newlines.append(line)
                else:
                    newlines.append(line)
            lines = newlines
        '''
          More fixups in user specification and what running config shows.
          "null0" in routes must be replaced by Null0, and "blackhole" must
          be replaced by Null0 as well.
        '''
        if (key[0].startswith('ip route')
                or key[0].startswith('ipv6 route') and 'null0' in key[0]
                or 'blackhole' in key[0]):
            key[0] = re.sub(r'\s+null0(\s*$)', ' Null0', key[0])
            key[0] = re.sub(r'\s+blackhole(\s*$)', ' Null0', key[0])

        if lines:
            if tuple(key) not in self.contexts:
                ctx = Context(tuple(key), lines)
                self.contexts[tuple(key)] = ctx
            else:
                ctx = self.contexts[tuple(key)]
                ctx.add_lines(lines)

        else:
            if tuple(key) not in self.contexts:
                ctx = Context(tuple(key), [])
                self.contexts[tuple(key)] = ctx
Exemple #16
0
 def network(self, key, value):
     if not isinstance(value, _IPAddrBase):
         value = IPNetwork(value)
     self.net = (key, value)
     return self
    def cleanformset_nfs_share_path(self, formset, forms):
        dev = None
        valid = True
        ismp = False
        for form in forms:
            if not hasattr(form, "cleaned_data"):
                continue
            path = form.cleaned_data.get("path")
            if not path:
                continue
            parent = os.path.join(path, "..")
            try:
                stat = os.stat(path.encode("utf8"))
                if dev is None:
                    dev = stat.st_dev
                elif dev != stat.st_dev:
                    self._fserrors = self.error_class([
                        _("Paths for a NFS share must reside within the same "
                          "filesystem")
                    ])
                    valid = False
                    break
                if os.stat(parent.encode("utf8")).st_dev != stat.st_dev:
                    ismp = True
                if ismp and len(forms) > 1:
                    self._fserrors = self.error_class([
                        _("You cannot share a mount point and subdirectories "
                          "all at once")
                    ])
                    valid = False
                    break

            except OSError:
                pass

        if not ismp and self.cleaned_data.get('nfs_alldirs'):
            self._errors['nfs_alldirs'] = self.error_class(
                [_("This option can only be used for datasets.")])
            valid = False

        networks = self.cleaned_data.get("nfs_network", "")
        if not networks:
            networks = ['0.0.0.0/0']
        else:
            networks = networks.split(" ")

        qs = models.NFS_Share.objects.all()
        if self.instance.id:
            qs = qs.exclude(id=self.instance.id)

        used_networks = []
        for share in qs:
            try:
                stdev = os.stat(
                    share.paths.all()[0].path.encode("utf8")).st_dev
            except:
                continue
            if share.nfs_network:
                used_networks.extend([(y, stdev)
                                      for y in share.nfs_network.split(" ")])
            else:
                used_networks.append(('0.0.0.0/0', stdev))
            if (self.cleaned_data.get("nfs_alldirs") and share.nfs_alldirs
                    and stdev == dev):
                self._errors['nfs_alldirs'] = self.error_class(
                    [_("This option is only available once per mountpoint")])
                valid = False
                break

        for network in networks:
            networkobj = IPNetwork(network)
            for unetwork, ustdev in used_networks:
                try:
                    unetworkobj = IPNetwork(unetwork)
                except Exception:
                    # If for some reason other values in db are not valid networks
                    unetworkobj = IPNetwork('0.0.0.0/0')
                if networkobj.overlaps(unetworkobj) and dev == ustdev:
                    self._errors['nfs_network'] = self.error_class([
                        _("The network %s is already being shared and cannot "
                          "be used twice for the same filesystem") %
                        (network, )
                    ])
                    valid = False
                    break

        return valid
Exemple #18
0
    def clean(self):
        cdata = self.cleaned_data

        _n = notifier()
        if (
            not _n.is_freenas() and _n.failover_licensed() and
            _n.failover_status() != 'SINGLE' and (
                self.instance.id and self.instance.int_vip
            )
        ):
            from freenasUI.failover.models import Failover
            try:
                if Failover.objects.all()[0].disabled is False:
                    self._errors['__all__'] = self.error_class([_(
                        'Failover needs to be disabled to perform network '
                        'changes.'
                    )])
            except:
                log.warn('Failed to verify failover status', exc_info=True)

        ipv4key = 'int_ipv4address'
        ipv4addr = cdata.get(ipv4key)
        ipv4addr_b = cdata.get('int_ipv4address_b')
        ipv4net = cdata.get("int_v4netmaskbit")

        if ipv4addr and ipv4addr_b and ipv4net:
            network = IPNetwork('%s/%s' % (ipv4addr, ipv4net))
            if not network.overlaps(
                IPNetwork('%s/%s' % (ipv4addr_b, ipv4net))
            ):
                self._errors['int_ipv4address_b'] = self.error_class([
                    _('The IP must be within the same network')
                ])

        ipv6addr = cdata.get("int_ipv6address")
        ipv6net = cdata.get("int_v6netmaskbit")
        ipv4 = True if ipv4addr and ipv4net else False
        ipv6 = True if ipv6addr and ipv6net else False

        # IF one field of ipv4 is entered, require the another
        if (ipv4addr or ipv4net) and not ipv4:
            if not (ipv4addr or ipv4addr_b) and not self._errors.get(ipv4key):
                self._errors[ipv4key] = self.error_class([
                    _("You have to specify IPv4 address as well"),
                ])
            if not ipv4net and 'int_v4netmaskbit' not in self._errors:
                self._errors['int_v4netmaskbit'] = self.error_class([
                    _("You have to choose IPv4 netmask as well"),
                ])

        # IF one field of ipv6 is entered, require the another
        if (ipv6addr or ipv6net) and not ipv6:
            if not ipv6addr and not self._errors.get('int_ipv6address'):
                self._errors['int_ipv6address'] = self.error_class([
                    _("You have to specify IPv6 address as well"),
                ])
            if not ipv6net:
                self._errors['int_v6netmaskbit'] = self.error_class([
                    _("You have to choose IPv6 netmask as well"),
                ])

        vip = cdata.get("int_vip")
        dhcp = cdata.get("int_dhcp")
        if not dhcp:
            if vip and not ipv4addr_b:
                self._errors['int_ipv4address_b'] = self.error_class([
                    _("This field is required for failover")
                ])
            if vip and not ipv4addr:
                self._errors['int_ipv4address'] = self.error_class([
                    _("This field is required for failover")
                ])

        return cdata
Exemple #19
0
 def public_network(self):
     return str(
         IPNetwork(self.environment().network_by_name('public').ip_network))
Exemple #20
0
def render(self, **kwargs):
    policy = self.data
    afi = kwargs['afi']
    config_blob = []

    def afi_match(host):
        if host == "any":
            return True
        elif IPNetwork(host).version == afi:
            return True
        else:
            return False

    for rule in policy:
        rule = rule[0]
        s_hosts = rule['source']['l3']['ip']
        d_hosts = rule['destination']['l3']['ip']
        stateful = rule['keywords']['state']
        logging = rule['keywords']['log']

        # deal with ICMP
        if "icmp" in rule['protocol']:
            policy = rule['protocol']['icmp']
            # FIXME this should happen in render or aclsemantics
            if not isinstance(policy, Closure):
                policy = [policy]

            # cycle through all ICMP related elements in the AST
            for entry in policy:
                for s_host in s_hosts:
                    if not afi_match(s_host):
                        continue
                    for d_host in d_hosts:
                        if not afi_match(d_host):
                            continue
                        if rule['action'] == "allow":
                            action = "permit"
                        else:
                            action = "deny"
                        extended = "extended " if afi == 4 else ""
                        yes_v6 = "ipv6 " if afi == 6 else ""
                        line = "%saccess-list %s %s%s icmp" \
                            % (yes_v6, self.name + "-v%s" % afi,
                               extended, action)

                        if s_host == u'any':
                            line += " any"
                        elif IPNetwork(s_host).prefixlen in [32, 128]:
                            line += " host %s" % s_host.split('/')[0]
                        # IPv4 must be with netmask, IPv6 in CIDR notation
                        elif afi == 4:
                            line += " %s" % IPNetwork(s_host).with_netmask.replace('/', ' ')
                        else:
                            line += " " + s_host

                        if d_host == u'any':
                            line += " any"
                        elif IPNetwork(d_host).prefixlen in [32, 128]:
                            line += " host %s" % d_host.split('/')[0]
                        # IPv4 must be with netmask, IPv6 in CIDR notation
                        elif afi == 4:
                            line += " %s" % IPNetwork(d_host).with_netmask.replace('/', ' ')
                        else:
                            line += " " + d_host

                        if not entry['icmp_type'] == "any":
                            line += " " + str(entry['icmp_type'])

                        if logging:
                            line += " log"

                        if line not in config_blob:
                            config_blob.append(line)
            # jump out of the loop because we have nothing to do with
            # L4 when doing ICMP
            continue

        # layer 3 and 4
        s_ports = rule['source']['l4']['ports']
        d_ports = rule['destination']['l4']['ports']

        for s_port in s_ports:
            for d_port in d_ports:
                for s_host in s_hosts:
                    if not afi_match(s_host):
                        continue
                    for d_host in d_hosts:
                        if not afi_match(d_host):
                            continue
                        extended = "extended " if afi == 4 else ""
                        yes_v6 = "ipv6 " if afi == 6 else ""
                        line = "%saccess-list %s %s" \
                            % (yes_v6, self.name + "-v%s" % afi, extended)
                        if rule['action'] == "allow":
                            action = "permit"
                        else:
                            action = "deny"
                        line += action
                        if rule['protocol'] == "any":
                            line += " ip"
                        else:
                            line += " " + rule['protocol']

                        if s_host == u'any':
                            line += " any"
                        elif IPNetwork(s_host).prefixlen in [32, 128]:
                            line += " host %s" % s_host.split('/')[0]
                        # IPv4 must be with netmask, IPv6 in CIDR notation
                        elif afi == 4:
                            line += " %s" % IPNetwork(s_host).with_netmask.replace('/', ' ')
                        else:
                            line += " " + s_host

                        if type(s_port) == list:
                            s_port = s_port[0]

                        if type(s_port) == tuple:
                            line += " range %s %s" % (s_port[0], s_port[1])
                        elif not s_port == "any":
                            line += " eq %s" % str(s_port)

                        if d_host == u'any':
                            line += " any"
                        elif IPNetwork(d_host).prefixlen in [32, 128]:
                            line += " host %s" % d_host.split('/')[0]
                        # IPv4 must be with netmask, IPv6 in CIDR notation
                        elif afi == 4:
                            line += " %s" % IPNetwork(d_host).with_netmask.replace('/', ' ')
                        else:
                            line += " " + d_host

                        if type(d_port) == list:
                            d_port = d_port[0]

                        if type(d_port) == tuple:
                            line += " range %s %s" % (d_port[0], d_port[1])
                        elif not d_port == "any":
                            line += " eq %s" % str(d_port)

                        if stateful and rule['protocol'] == "tcp":
                            line += " established"

                        if logging:
                            line += " log"

                        if line not in config_blob:
                            config_blob.append(line)

    # add final deny any any at the end of each policy
    extended = "extended " if afi == 4 else ""
    yes_v6 = "ipv6 " if afi == 6 else ""
    line = "%saccess-list %s %sdeny ip any any" \
        % (yes_v6, self.name + "-v%s" % afi, extended)
    config_blob.append(line)
    return config_blob
Exemple #21
0
def create_app(config_name):
    app = Flask(__name__, static_url_path='')
    # This first loads the configuration from eg. config['development'] which corresponds to the DevelopmentConfig class in the config.py
    app.config.from_object(config[config_name])
    # Then you can override the values with the contents of the file the OPENTARGETS_API_LOCAL_SETTINGS environment variable points to.
    # For eg:
    # $ export OPENTARGETS_API_LOCAL_SETTINGS=/path/to/settings.cfg
    #
    # where settings.cfg looks like:
    #
    # DEBUG = False
    # SECRET_KEY = 'foo'
    #
    app.config.from_envvar("OPENTARGETS_API_LOCAL_SETTINGS", silent=True)

    config[config_name].init_app(app)
    api_version = app.config['API_VERSION']
    api_version_minor = app.config['API_VERSION_MINOR']


    app.logger.info('looking for elasticsearch at: %s' % app.config['ELASTICSEARCH_URL'])


    app.extensions['redis-core'] = Redis(app.config['REDIS_SERVER_PATH'], db=0) #served data
    app.extensions['redis-service'] = Redis(app.config['REDIS_SERVER_PATH'], db=1) #cache, rate limit and internal things
    app.extensions['redis-user'] = Redis(app.config['REDIS_SERVER_PATH'], db=2)# user info
    '''setup cache'''
    app.extensions['redis-service'].config_set('save','')
    app.extensions['redis-service'].config_set('appendonly', 'no')
    icache = InternalCache(app.extensions['redis-service'],
                           str(api_version_minor))
    ip2org = IP2Org(icache)
    if app.config['ELASTICSEARCH_URL']:
        es = Elasticsearch(app.config['ELASTICSEARCH_URL'],
                           # # sniff before doing anything
                           # sniff_on_start=True,
                           # # refresh nodes after a node fails to respond
                           # sniff_on_connection_fail=True,
                           # # and also every 60 seconds
                           # sniffer_timeout=60
                           timeout=60 * 20,
                           maxsize=32,
                           )
    else:
        es = None
    '''elasticsearch handlers'''
    app.extensions['esquery'] = esQuery(es,
                                        DataTypes(app),
                                        DataSourceScoring(app),
                                        index_data=app.config['ELASTICSEARCH_DATA_INDEX_NAME'],
                                        index_efo=app.config['ELASTICSEARCH_EFO_LABEL_INDEX_NAME'],
                                        index_eco=app.config['ELASTICSEARCH_ECO_INDEX_NAME'],
                                        index_genename=app.config['ELASTICSEARCH_GENE_NAME_INDEX_NAME'],
                                        index_expression=app.config['ELASTICSEARCH_EXPRESSION_INDEX_NAME'],
                                        index_reactome=app.config['ELASTICSEARCH_REACTOME_INDEX_NAME'],
                                        index_association=app.config['ELASTICSEARCH_DATA_ASSOCIATION_INDEX_NAME'],
                                        index_search=app.config['ELASTICSEARCH_DATA_SEARCH_INDEX_NAME'],
                                        index_relation=app.config['ELASTICSEARCH_DATA_RELATION_INDEX_NAME'],
                                        docname_data=app.config['ELASTICSEARCH_DATA_DOC_NAME'],
                                        docname_efo=app.config['ELASTICSEARCH_EFO_LABEL_DOC_NAME'],
                                        docname_eco=app.config['ELASTICSEARCH_ECO_DOC_NAME'],
                                        docname_genename=app.config['ELASTICSEARCH_GENE_NAME_DOC_NAME'],
                                        docname_expression=app.config['ELASTICSEARCH_EXPRESSION_DOC_NAME'],
                                        docname_reactome=app.config['ELASTICSEARCH_REACTOME_REACTION_DOC_NAME'],
                                        docname_association=app.config['ELASTICSEARCH_DATA_ASSOCIATION_DOC_NAME'],
                                        docname_search=app.config['ELASTICSEARCH_DATA_SEARCH_DOC_NAME'],
                                        # docname_search_target=app.config['ELASTICSEARCH_DATA_SEARCH_TARGET_DOC_NAME'],
                                        # docname_search_disease=app.config['ELASTICSEARCH_DATA_SEARCH_DISEASE_DOC_NAME'],
                                        docname_relation=app.config['ELASTICSEARCH_DATA_RELATION_DOC_NAME'],
                                        log_level=app.logger.getEffectiveLevel(),
                                        cache=icache
                                        )

    app.extensions['es_access_store'] = esStore(es,
                                        eventlog_index=app.config['ELASTICSEARCH_LOG_EVENT_INDEX_NAME'],
                                        ip2org=ip2org,
                                        )
    '''mixpanel handlers'''
    if Config.MIXPANEL_TOKEN:
        mp = Mixpanel(Config.MIXPANEL_TOKEN, consumer=AsyncBufferedConsumer())
        app.extensions['mixpanel']= mp
        app.extensions['mp_access_store'] = MixPanelStore(mp,
                                            ip2org=ip2org,
                                            )


        app.extensions['proxy'] = ProxyHandler(allowed_targets=app.config['PROXY_SETTINGS']['allowed_targets'],
                                               allowed_domains=app.config['PROXY_SETTINGS']['allowed_domains'],
                                               allowed_request_domains=app.config['PROXY_SETTINGS']['allowed_request_domains'])

    # basepath = app.config['PUBLIC_API_BASE_PATH']+api_version
    # cors = CORS(app, resources=r'/api/*', allow_headers='Content-Type,Auth-Token')

    ''' define cache'''
    # cache = Cache(config={'CACHE_TYPE': 'simple'})
    # cache.init_app(latest_blueprint)
    # latest_blueprint.cache = cache
    # latest_blueprint.extensions['cache'] = cache
    # app.cache = SimpleCache()
    app.cache = FileSystemCache('/tmp/cttv-rest-api-cache', threshold=100000, default_timeout=60*60, mode=777)

    '''Set usage limiter '''
    # limiter = Limiter(global_limits=["2000 per hour", "20 per second"])
    # limiter.init_app(app)# use redis to store limits

    '''Load api keys in redis'''
    rate_limit_file = app.config['USAGE_LIMIT_PATH']
    if not os.path.exists(rate_limit_file):
        rate_limit_file = '../'+rate_limit_file
    csvfile = None
    if Config.GITHUB_AUTH_TOKEN:
        r = requests.get('https://api.github.com/repos/opentargets/rest_api_auth/contents/rate_limit.csv',
                         headers = {'Authorization': 'token %s'%Config.GITHUB_AUTH_TOKEN,
                                    'Accept': 'application/vnd.github.v3.raw'})
        if r.ok:
            csvfile = r.text.split('\n')
            app.logger.info('Retrieved rate limit file from github remote')
        else:
            app.logger.warning('Cannot retrieve rate limit file from remote, SKIPPED!')
    elif os.path.exists(rate_limit_file):
        csvfile = open(rate_limit_file)
        app.logger.info('Using dummy rate limit file')

    if csvfile is None:
        app.logger.error('cannot find rate limit file: %s. RATE LIMIT QUOTA LOAD SKIPPED!'%rate_limit_file)
    else:
        reader = csv.DictReader(csvfile)
        for row in reader:
            auth_key = AuthKey(**row)
            app.extensions['redis-user'].hmset(auth_key.get_key(), auth_key.__dict__)
        try:
            csvfile.close()
        except:
            pass
        app.logger.info('succesfully loaded rate limit file')


    '''load ip name resolution'''
    ip_resolver = defaultdict(lambda: "PUBLIC")
    ip_list_file = app.config['IP_RESOLVER_LIST_PATH']
    if not os.path.exists(ip_list_file):
        ip_list_file = '../' + ip_list_file
    if os.path.exists(ip_list_file):
        with open(ip_list_file) as csvfile:
            reader = csv.DictReader(csvfile)
            for row in reader:
                net = IPNetwork(row['ip'])
                ip_resolver[net] = row['org']
    else:
        app.logger.warning('cannot find IP list for IP resolver. All traffic will be logged as PUBLIC')
    app.config['IP_RESOLVER'] = ip_resolver



    '''compress http response'''
    compress = Compress()
    compress.init_app(app)

    latest_blueprint = Blueprint('latest', __name__)
    current_version_blueprint = Blueprint(str(api_version), __name__)
    current_minor_version_blueprint = Blueprint(str(api_version_minor), __name__)


    specpath = '/cttv'

    if app.config['PROFILE'] == True:
        from werkzeug.contrib.profiler import ProfilerMiddleware
        app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])


    '''set the right prefixes'''

    create_api(latest_blueprint, api_version, specpath)
    create_api(current_version_blueprint, api_version, specpath)
    create_api(current_minor_version_blueprint, api_version_minor, specpath)

    # app.register_blueprint(latest_blueprint, url_prefix='/latest/platform')
    app.register_blueprint(current_version_blueprint, url_prefix='/v'+str(api_version) + '/platform')
    app.register_blueprint(current_minor_version_blueprint, url_prefix='/v'+str(api_version_minor) + '/platform')


    '''serve the static docs'''
    
    try:
        '''
        NOTE: this file gets created only at deployment time
        '''
        openapi_def = yaml.load(file('app/static/openapi.yaml', 'r'))
        app.logger.info('parsing swagger from static/openapi.yaml')

    except IOError:
        '''if we are not deployed, then simply use the template'''
        openapi_def = yaml.load(file('openapi.template.yaml', 'r'))
        app.logger.error('parsing swagger from openapi.template.yaml')

    with open("api-description.md", "r") as f:
        desc = f.read()
    openapi_def['info']['description'] = desc
    openapi_def['basePath'] = '/v%s' % str(api_version)
    @app.route('/v%s/platform/swagger' % str(api_version))
    def serve_swagger(apiversion=api_version):
        return jsonify(openapi_def)


    @app.route('/v%s/platform/docs' % str(api_version))
    def render_redoc(apiversion=api_version):
        return render_template('docs.html',api_version=apiversion)


    '''pre and post-request'''


    @app.before_request
    def before_request():
        g.request_start = datetime.now()
    @app.after_request
    def after(resp):
        try:
            rate_limiter = RateLimiter()
            now = datetime.now()
            took = (now - g.request_start).total_seconds()*1000
            if took > 500:
                cache_time = str(int(3600*took))# set cache to last one our for each second spent in the request
                resp.headers.add('X-Accel-Expires', cache_time)
            took = int(round(took))
            LogApiCallWeight(took)
            # if took < RateLimiter.DEFAULT_CALL_WEIGHT:
            #     took = RateLimiter.DEFAULT_CALL_WEIGHT
            current_values = increment_call_rate(took,rate_limiter)
            now = datetime.now()
            ceil10s=round(ceil_dt_to_future_time(now, 10),2)
            ceil1h=round(ceil_dt_to_future_time(now, 3600),2)
            usage_left_10s = rate_limiter.short_window_rate-current_values['short']
            usage_left_1h = rate_limiter.long_window_rate - current_values['long']
            min_ceil = ceil10s
            if usage_left_1h <0:
                min_ceil = ceil1h
            if (usage_left_10s < 0) or (usage_left_1h <0):
                resp.headers.add('Retry-After', min_ceil)
            resp.headers.add('X-API-Took', took)
            resp.headers.add('X-Usage-Limit-10s', rate_limiter.short_window_rate)
            resp.headers.add('X-Usage-Limit-1h', rate_limiter.long_window_rate)
            resp.headers.add('X-Usage-Remaining-10s', usage_left_10s)
            resp.headers.add('X-Usage-Remaining-1h', usage_left_1h)
            # resp.headers.add('X-Usage-Limit-Reset-10s', ceil10s)
            # resp.headers.add('X-Usage-Limit-Reset-1h', ceil1h)
            resp.headers.add('Access-Control-Allow-Origin', '*')
            resp.headers.add('Access-Control-Allow-Headers','Content-Type,Auth-Token')
            resp.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
            if do_not_cache(request):# do not cache in the browser
                resp.headers.add('Cache-Control', "no-cache, must-revalidate, max-age=0")
            else:
                resp.headers.add('Cache-Control', "no-transform, public, max-age=%i, s-maxage=%i"%(took*1800/1000, took*9000/1000))
            return resp

        except Exception as e:
            app.logger.exception('failed request teardown function', str(e))
            return resp



    # Override the HTTP exception handler.
    app.handle_http_exception = get_http_exception_handler(app)
    return app
 def get_net_mask(self, net_name):
     return str(
         IPNetwork(self.get_virtual_environment().network_by_name(
             net_name).ip_network).netmask)
Exemple #23
0
 def internal_router(self):
     return str(
         IPNetwork(
             self.environment().network_by_name('internal').ip_network)[1])
Exemple #24
0
 def fixed_network(self):
     return str(
         IPNetwork(self.environment().network_by_name(
             'private').ip_network).subnet(new_prefix=27)[0])
Exemple #25
0
 def public_virtual_ip(self):
     return str(
         IPNetwork(self.environment().network_by_name(
             'public').ip_network).subnet(new_prefix=29)[-2][-1])
Exemple #26
0
 def floating_network(self):
     return str(
         IPNetwork(self.environment().network_by_name(
             'public').ip_network).subnet(new_prefix=29)[-1])
Exemple #27
0
argparser.add_argument('--peer-as', metavar='ASNUM', dest='peer_as', default='65500', help='BGP peer AS number (default: 65500)')
argparser.add_argument('--local-cidr', metavar='CIDR', required=True, dest='local_cidr', help='BGP sessions source addresses block (no default)')
argparser.add_argument('--local-as', metavar='ASNUM', dest='local_as', default='65501', help='BGP local AS number (default: 65501)')
argparser.add_argument('--local-interface', metavar='INTERFACE', dest='local_interface', help='local interface used for BGP sessions source addresses (default: none)')
argparser.add_argument('--announce-cidr', metavar='CIDR', dest='announce_cidr', help='BGP /32 announces block (default: none)')
argparser.add_argument('--sessions', metavar='COUNT', dest='sessions', default=1, help='number of BGP sessions to establish (default: 1)')
args = argparser.parse_args()

# validate command-line options
try:
    peer_ip = IPAddress(args.peer_ip)
except:
    print 'invalid peer address "%s" - aborting' % args.peer_ip
    sys.exit(1)
try:
    local_cidr = IPNetwork(args.local_cidr)
except:
    print 'invalid local addresses block "%s" - aborting' % args.local_cidr
    sys.exit(1)
if args.announce_cidr:
    try:
        announce_cidr = IPNetwork(args.announce_cidr)
        announce_cidr = announce_cidr.iterhosts()
    except:
        print 'invalid announce addresses block %s - aborting' % args.announce_cidr
        sys.exit(1)

# create ExaBGP configuration + setup address aliases if required
configuration = 'group peers {\n'
sessions      = 0
for address in local_cidr.iterhosts():
Exemple #28
0
 def netmask(self):
     return IPNetwork(self.ip_network).netmask
Exemple #29
0
 def ip_pool_end(self):
     return IPNetwork(self.ip_network)[-2]
Exemple #30
0
 def internal_net_mask(self):
     return str(
         IPNetwork(self.environment().network_by_name(
             'internal').ip_network).netmask)
Exemple #31
0
 def ip_pool_start(self):
     return IPNetwork(self.ip_network)[2]
Exemple #32
0
 def _check_subnet(self, address):
     ip = IPAddress(address)
     for subnet in self.subnet_set.values_list('subnet', flat=True):
         if ip in IPNetwork(subnet):
             return True
     return False
Exemple #33
0
def main():
    """
    Main routine
    :return: None
    """
    # Login to APIC
    description = ('Simple application that logs on to the APIC'
                   ' and displays all of the External Subnets.')
    creds = aci.Credentials('apic', description)
    creds.add_argument('-f', '--find_ip', help='IP address to search for')
    args = creds.get()

    session = aci.Session(args.url, args.login, args.password)
    resp = session.login()
    if not resp.ok:
        print('%% Could not login to APIC')

    if not args.find_ip:
        print "Error: -f|--find_ip <ip_address> argument required"
        sys.exit(1)

    print "searching for " + args.find_ip
    # Download all of the tenants, app profiles, and Subnets
    # and store the names as tuples in two lists
    priv = []
    publ = []
    ip = args.find_ip
    tenants = aci.Tenant.get_deep(session, limit_to=['fvTenant',
                                                     'fvSubnet',
                                                     'l3extOut',
                                                     'l3extInstP',
                                                     'l3extSubnet'])

    for tenant in tenants:
        apps = aci.AppProfile.get(session, tenant)
        for app in apps:
            bds = aci.BridgeDomain.get(session, tenant)
            for bd in bds:
                subnets = aci.Subnet.get(session, bd, tenant)
                for subnet in subnets:
                    net = IPNetwork(subnet.addr)
                    if net.Contains(IPNetwork(ip)):
                        priv.append((tenant.name, app.name, bd.name,
                                     subnet.addr, subnet.get_scope()))

    for tenant in tenants:
        outside_l3s = tenant.get_children(only_class=aci.OutsideL3)
        for outside_l3 in outside_l3s:
            outside_epgs = outside_l3.get_children(only_class=aci.OutsideEPG)
            for outside_epg in outside_epgs:
                outside_networks = outside_epg.get_children(only_class=aci.OutsideNetwork)
                for outside_network in outside_networks:
                    net = IPNetwork(outside_network.addr)
                    if net.Contains(IPNetwork(ip)):
                        publ.append((tenant.name,
                                     outside_l3.name,
                                     outside_epg.name,
                                     outside_network.addr,
                                     outside_network.get_scope()))

    # Display
    template = "{0:20} {1:20} {2:20} {3:18} {4:15}"
    if len(priv):
        print ""
        print(template.format("Tenant",
                              "App",
                              "Bridge Domain",
                              "Subnet",
                              "Scope"))
        print(template.format("-" * 20,
                              "-" * 20,
                              "-" * 20,
                              "-" * 18,
                              "-" * 15))
        for rec in priv:
            print(template.format(*rec))
    if len(publ):
        print ""
        print(template.format("Tenant",
                              "OutsideL3",
                              "OutsideEPG",
                              "Subnet",
                              "Scope"))
        print(template.format("-" * 20,
                              "-" * 20,
                              "-" * 20,
                              "-" * 18,
                              "-" * 15))
        for rec in publ:
            print(template.format(*rec))
Exemple #34
0
import zlib

from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from ipaddr import IPNetwork
from urlparse import urlparse

CHARSET_RE = re.compile(r'charset=(\S+)')

DEFAULT_ENCODING = 'utf-8'

DEFAULT_HEADERS = (('Accept-Encoding', 'gzip'), )

DEFAULT_USER_AGENT = 'sentry/%s' % sentry.VERSION

DISALLOWED_IPS = set((IPNetwork(i) for i in settings.SENTRY_DISALLOWED_IPS))


class NoRedirectionHandler(urllib2.HTTPErrorProcessor):
    def http_response(self, request, response):
        return response

    https_response = http_response


def is_valid_url(url):
    """
    Tests a URL to ensure it doesn't appear to be a blacklisted IP range.
    """
    parsed = urlparse(url)
    if not parsed.hostname:
Exemple #35
0
def create_app(config_name):
    app = Flask(__name__, static_url_path='')
    # This first loads the configuration from eg. config['development'] which corresponds to the DevelopmentConfig class in the config.py
    app.config.from_object(config[config_name])
    # Then you can override the values with the contents of the file the OPENTARGETS_API_LOCAL_SETTINGS environment variable points to.
    # For eg:
    # $ export OPENTARGETS_API_LOCAL_SETTINGS=/path/to/settings.cfg
    #
    # where settings.cfg looks like:
    #
    # DEBUG = False
    # SECRET_KEY = 'foo'
    #
    app.config.from_envvar("OPENTARGETS_API_LOCAL_SETTINGS", silent=True)

    config[config_name].init_app(app)
    api_version = app.config['API_VERSION']
    api_version_minor = app.config['API_VERSION_MINOR']


    app.logger.info('looking for elasticsearch at: %s' % app.config['ELASTICSEARCH_URL'])


    app.extensions['redis-core'] = Redis(app.config['REDIS_SERVER_PATH'], db=0) #served data
    app.extensions['redis-service'] = Redis(app.config['REDIS_SERVER_PATH'], db=1) #cache, rate limit and internal things
    app.extensions['redis-user'] = Redis(app.config['REDIS_SERVER_PATH'], db=2)# user info
    '''setup cache'''
    app.extensions['redis-service'].config_set('save','')
    app.extensions['redis-service'].config_set('appendonly', 'no')
    icache = InternalCache(app.extensions['redis-service'],
                           str(api_version_minor))
    ip2org = IP2Org(icache)
    if app.config['ELASTICSEARCH_URL']:
        es = Elasticsearch(app.config['ELASTICSEARCH_URL'],
                           # # sniff before doing anything
                           # sniff_on_start=True,
                           # # refresh nodes after a node fails to respond
                           # sniff_on_connection_fail=True,
                           # # and also every 60 seconds
                           # sniffer_timeout=60
                           timeout=60 * 20,
                           maxsize=32,
                           )
    else:
        es = None
    '''elasticsearch handlers'''
    app.extensions['esquery'] = esQuery(
        es,
        DataTypes(app),
        DataSourceScoring(app),
        index_data=app.config['ELASTICSEARCH_DATA_INDEX_NAME'],
        index_drug=app.config['ELASTICSEARCH_DRUG_INDEX_NAME'],
        index_efo=app.config['ELASTICSEARCH_EFO_LABEL_INDEX_NAME'],
        index_eco=app.config['ELASTICSEARCH_ECO_INDEX_NAME'],
        index_genename=app.config['ELASTICSEARCH_GENE_NAME_INDEX_NAME'],
        index_expression=app.config['ELASTICSEARCH_EXPRESSION_INDEX_NAME'],
        index_reactome=app.config['ELASTICSEARCH_REACTOME_INDEX_NAME'],
        index_association=app.config['ELASTICSEARCH_DATA_ASSOCIATION_INDEX_NAME'],
        index_search=app.config['ELASTICSEARCH_DATA_SEARCH_INDEX_NAME'],
        index_relation=app.config['ELASTICSEARCH_DATA_RELATION_INDEX_NAME'],
        docname_data=app.config['ELASTICSEARCH_DATA_DOC_NAME'],
        docname_drug=app.config['ELASTICSEARCH_DRUG_DOC_NAME'],
        docname_efo=app.config['ELASTICSEARCH_EFO_LABEL_DOC_NAME'],
        docname_eco=app.config['ELASTICSEARCH_ECO_DOC_NAME'],
        docname_genename=app.config['ELASTICSEARCH_GENE_NAME_DOC_NAME'],
        docname_expression=app.config['ELASTICSEARCH_EXPRESSION_DOC_NAME'],
        docname_reactome=app.config['ELASTICSEARCH_REACTOME_REACTION_DOC_NAME'],
        docname_association=app.config['ELASTICSEARCH_DATA_ASSOCIATION_DOC_NAME'],
        docname_search=app.config['ELASTICSEARCH_DATA_SEARCH_DOC_NAME'],
        # docname_search_target=app.config['ELASTICSEARCH_DATA_SEARCH_TARGET_DOC_NAME'],
        # docname_search_disease=app.config['ELASTICSEARCH_DATA_SEARCH_DISEASE_DOC_NAME'],
        docname_relation=app.config['ELASTICSEARCH_DATA_RELATION_DOC_NAME'],
        log_level=app.logger.getEffectiveLevel(),
        cache=icache
        )

    app.extensions['es_access_store'] = esStore(es,
        eventlog_index=app.config['ELASTICSEARCH_LOG_EVENT_INDEX_NAME'],
        ip2org=ip2org,
        )

    '''mixpanel handlers'''
    if Config.MIXPANEL_TOKEN:
        mp = Mixpanel(Config.MIXPANEL_TOKEN, consumer=AsyncBufferedConsumer())
        app.extensions['mixpanel']= mp
        app.extensions['mp_access_store'] = MixPanelStore(
            mp,
            ip2org=ip2org,
            )


        app.extensions['proxy'] = ProxyHandler(
            allowed_targets=app.config['PROXY_SETTINGS']['allowed_targets'],
            allowed_domains=app.config['PROXY_SETTINGS']['allowed_domains'],
            allowed_request_domains=app.config['PROXY_SETTINGS']['allowed_request_domains'])

    # basepath = app.config['PUBLIC_API_BASE_PATH']+api_version
    # cors = CORS(app, resources=r'/api/*', allow_headers='Content-Type,Auth-Token')

    ''' define cache'''
    # cache = Cache(config={'CACHE_TYPE': 'simple'})
    # cache.init_app(latest_blueprint)
    # latest_blueprint.cache = cache
    # latest_blueprint.extensions['cache'] = cache
    # app.cache = SimpleCache()
    app.cache = FileSystemCache('/tmp/cttv-rest-api-cache', threshold=100000, default_timeout=60*60, mode=777)

    '''load ip name resolution'''
    ip_resolver = defaultdict(lambda: "PUBLIC")
    ip_list_file = app.config['IP_RESOLVER_LIST_PATH']
    if not os.path.exists(ip_list_file):
        ip_list_file = '../' + ip_list_file
    if os.path.exists(ip_list_file):
        with open(ip_list_file) as csvfile:
            reader = csv.DictReader(csvfile)
            for row in reader:
                net = IPNetwork(row['ip'])
                ip_resolver[net] = row['org']
    else:
        app.logger.warning('cannot find IP list for IP resolver. All traffic will be logged as PUBLIC')
    app.config['IP_RESOLVER'] = ip_resolver



    '''compress http response'''
    compress = Compress()
    compress.init_app(app)

    latest_blueprint = Blueprint('latest', __name__)
    current_version_blueprint = Blueprint(str(api_version), __name__)
    current_minor_version_blueprint = Blueprint(str(api_version_minor), __name__)


    specpath = '/cttv'

    if app.config['PROFILE'] == True:
        from werkzeug.contrib.profiler import ProfilerMiddleware
        app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])


    '''set the right prefixes'''

    create_api(latest_blueprint, api_version, specpath)
    create_api(current_version_blueprint, api_version, specpath)
    create_api(current_minor_version_blueprint, api_version_minor, specpath)

    # app.register_blueprint(latest_blueprint, url_prefix='/latest/platform')
    app.register_blueprint(current_version_blueprint, url_prefix='/v'+str(api_version) + '/platform')
    app.register_blueprint(current_minor_version_blueprint, url_prefix='/v'+str(api_version_minor) + '/platform')


    '''serve the static docs'''
    openapi_def = yaml.load(file('app/static/openapi.template.yaml', 'r'))
    app.logger.info('parsing swagger from app/static/openapi.template.yaml')

    #inject the description into the docs
    with open("api-description.md", "r") as f:
        desc = f.read()
    openapi_def['info']['description'] = desc
    openapi_def['basePath'] = '/v%s' % str(api_version)
    @app.route('/v%s/platform/swagger' % str(api_version))
    def serve_swagger(apiversion=api_version):
        return jsonify(openapi_def)

    @app.route('/v%s/platform/docs/swagger-ui' % str(api_version))
    def render_swaggerui(apiversion=api_version):
        return render_template('swaggerui.html',api_version=apiversion)

    '''pre and post-request'''


    @app.before_request
    def before_request():
        g.request_start = datetime.now()
    @app.after_request
    def after(resp):
        try:
            now = datetime.now()
            took = int(round((now - g.request_start).total_seconds()))
            resp.headers.add('Access-Control-Allow-Origin', '*')
            resp.headers.add('Access-Control-Allow-Headers','Content-Type,Auth-Token')
            resp.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
            if do_not_cache(request):# do not cache in the browser
                resp.headers.add('Cache-Control', "no-cache, must-revalidate, max-age=0")
            else:
                cache = 30 * 24 * 60 * 60 #cache for seven days
                resp.headers.add('Cache-Control', "no-transform, max-age=%i"%(cache))
            return resp

        except Exception as e:
            app.logger.exception('failed request teardown function', str(e))
            return resp

    # Override the HTTP exception handler.
    app.handle_http_exception = get_http_exception_handler(app)
    return app
Exemple #36
0
def main_i():
    """internal main routine

    parse command line, decide what action will be taken;
    we can either:
    - query/manipulate configuration
    - format gsyncd urls using gsyncd's url parsing engine
    - start service in following modes, in given stages:
      - monitor: startup(), monitor()
      - master: startup(), connect_remote(), connect(), service_loop()
      - slave: startup(), connect(), service_loop()
    """
    rconf = {'go_daemon': 'should'}

    def store_abs(opt, optstr, val, parser):
        if val and val != '-':
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)
    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val
    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)
    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
    op.add_option('--gluster-command-dir', metavar='DIR',   default='')
    op.add_option('--gluster-log-file',    metavar='LOGF',  default=os.devnull, type=str, action='callback', callback=store_abs)
    op.add_option('--gluster-log-level',   metavar='LVL')
    op.add_option('--gluster-params',      metavar='PRMS',  default='')
    op.add_option('--gluster-cli-options', metavar='OPTS',  default='--log-file=-')
    op.add_option('--mountbroker',         metavar='LABEL')
    op.add_option('-p', '--pid-file',      metavar='PIDF',  type=str, action='callback', callback=store_abs)
    op.add_option('-l', '--log-file',      metavar='LOGF',  type=str, action='callback', callback=store_abs)
    op.add_option('--state-file',          metavar='STATF', type=str, action='callback', callback=store_abs)
    op.add_option('--ignore-deletes',      default=False, action='store_true')
    op.add_option('--use-rsync-xattrs',    default=False, action='store_true')
    op.add_option('-L', '--log-level',     metavar='LVL')
    op.add_option('-r', '--remote-gsyncd', metavar='CMD',   default=os.path.abspath(sys.argv[0]))
    op.add_option('--volume-id',           metavar='UUID')
    op.add_option('--session-owner',       metavar='ID')
    op.add_option('-s', '--ssh-command',   metavar='CMD',   default='ssh')
    op.add_option('--rsync-command',       metavar='CMD',   default='rsync')
    op.add_option('--rsync-options',       metavar='OPTS',  default='--sparse')
    op.add_option('--rsync-ssh-options',   metavar='OPTS',  default='--compress')
    op.add_option('--timeout',             metavar='SEC',   type=int, default=120)
    op.add_option('--connection-timeout',  metavar='SEC',   type=int, default=60, help=SUPPRESS_HELP)
    op.add_option('--sync-jobs',           metavar='N',     type=int, default=3)
    op.add_option('--turns',               metavar='N',     type=int, default=0, help=SUPPRESS_HELP)
    op.add_option('--allow-network',       metavar='IPS',   default='')
    op.add_option('--state-socket-unencoded', metavar='SOCKF', type=str, action='callback', callback=store_abs)
    op.add_option('--checkpoint',          metavar='LABEL', default='')
    # tunables for failover/failback mechanism:
    # None   - gsyncd behaves as normal
    # blind  - gsyncd works with xtime pairs to identify
    #          candidates for synchronization
    # wrapup - same as normal mode but does not assign
    #          xtimes to orphaned files
    # see crawl() for usage of the above tunables
    op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP)

    op.add_option('-c', '--config-file',   metavar='CONF',  type=str, action='callback', callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True))
    op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local)
    op.add_option('--listen', dest='listen', help=SUPPRESS_HELP,      action='callback', callback=store_local_curry(True))
    op.add_option('-N', '--no-daemon', dest="go_daemon",    action='callback', callback=store_local_curry('dont'))
    op.add_option('--debug', dest="go_daemon",              action='callback', callback=lambda *a: (store_local_curry('dont')(*a),
                                                                                                    setattr(a[-1].values, 'log_file', '-'),
                                                                                                    setattr(a[-1].values, 'log_level', 'DEBUG'))),

    for a in ('check', 'get'):
        op.add_option('--config-' + a,      metavar='OPT',  type=str, dest='config', action='callback',
                      callback=store_local_obj(a, lambda vx: {'opt': vx}))
    op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None}))
    for m in ('', '-rx', '-glob'):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option('--config-set' + m,   metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback',
                          callback=store_local_obj('set', lambda vx: {'opt': vx[0], 'val': vx[1], 'rx': rx}))
            op.add_option('--config-del' + m,   metavar='OPT',  type=str, dest='config', action='callback',
                          callback=store_local_obj('del', lambda vx: {'opt': vx, 'rx': rx}))
        conf_mod_opt_regex_variant(m and m[1:] or False)

    op.add_option('--normalize-url',           dest='url_print', action='callback', callback=store_local_curry('normal'))
    op.add_option('--canonicalize-url',        dest='url_print', action='callback', callback=store_local_curry('canon'))
    op.add_option('--canonicalize-escape-url', dest='url_print', action='callback', callback=store_local_curry('canon_esc'))

    tunables = [ norm(o.get_opt_string()[2:]) for o in op.option_list if o.callback in (store_abs, 'store_true', None) and o.get_opt_string() not in ('--version', '--help') ]
    remote_tunables = [ 'listen', 'go_daemon', 'timeout', 'session_owner', 'config_file', 'use_rsync_xattrs' ]
    rq_remote_tunables = { 'listen': True }

    # precedence for sources of values: 1) commandline, 2) cfg file, 3) defaults
    # -- for this to work out we need to tell apart defaults from explicitly set
    # options... so churn out the defaults here and call the parser with virgin
    # values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    confdata = rconf.get('config')
    if not (len(args) == 2 or \
            (len(args) == 1 and rconf.get('listen')) or \
            (len(args) <= 2 and confdata) or \
            rconf.get('url_print')):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    restricted = os.getenv('_GSYNCD_RESTRICTED_')

    if restricted:
        allopts = {}
        allopts.update(opts.__dict__)
        allopts.update(rconf)
        bannedtuns = set(allopts.keys()) - set(remote_tunables)
        if bannedtuns:
            raise GsyncdError('following tunables cannot be set with restricted SSH invocaton: ' + \
                              ', '.join(bannedtuns))
        for k, v in rq_remote_tunables.items():
            if not k in allopts or allopts[k] != v:
                raise GsyncdError('tunable %s is not set to value %s required for restricted SSH invocaton' % \
                                  (k, v))

    confrx = getattr(confdata, 'rx', None)
    if confrx:
        # peers are regexen, don't try to parse them
        if confrx == 'glob':
            args = [ '\A' + fnmatch.translate(a) for a in args ]
        canon_peers = args
        namedict = {}
    else:
        rscs = [resource.parse_url(u) for u in args]
        dc = rconf.get('url_print')
        if dc:
            for r in rscs:
                print(r.get_url(**{'normal': {},
                                   'canon': {'canonical': True},
                                   'canon_esc': {'canonical': True, 'escaped': True}}[dc]))
            return
        local = remote = None
        if rscs:
            local = rscs[0]
            if len(rscs) > 1:
                remote = rscs[1]
            if not local.can_connect_to(remote):
                raise GsyncdError("%s cannot work with %s" % (local.path, remote and remote.path))
        pa = ([], [], [])
        urlprms = ({}, {'canonical': True}, {'canonical': True, 'escaped': True})
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        peers, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring to / repreenting
        # peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:])
        if remote:
            rmap = { local: ('local', 'master'), remote: ('remote', 'slave') }
        else:
            rmap = { local: ('local', 'slave') }
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                if x.scheme == 'gluster':
                    namedict[name + 'vol'] = x.volume
    if not 'config_file' in rconf:
        rconf['config_file'] = os.path.join(os.path.dirname(sys.argv[0]), "conf/gsyncd.conf")
    gcnf = GConffile(rconf['config_file'], canon_peers, defaults.__dict__, opts.__dict__, namedict)

    checkpoint_change = False
    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == 'check':
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == 'get':
            gcnf.get(confdata.opt)
        elif confdata.op == 'set':
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == 'del':
            gcnf.delete(confdata.opt, confdata.rx)
        # when modifying checkpoint, it's important to make a log
        # of that, so in that case we go on to set up logging even
        # if its just config invocation
        if confdata.opt == 'checkpoint' and confdata.op in ('set', 'del') and \
           not confdata.rx:
            checkpoint_change = True
        if not checkpoint_change:
            return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    if restricted and gconf.allow_network:
        ssh_conn = os.getenv('SSH_CONNECTION')
        if not ssh_conn:
            #legacy env var
            ssh_conn = os.getenv('SSH_CLIENT')
        if ssh_conn:
            allowed_networks = [ IPNetwork(a) for a in gconf.allow_network.split(',') ]
            client_ip = IPAddress(ssh_conn.split()[0])
            allowed = False
            for nw in allowed_networks:
                if client_ip in nw:
                    allowed = True
                    break
            if not allowed:
                raise GsyncdError("client IP address is not allowed")

    ffd = rconf.get('feedback_fd')
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    #normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    if checkpoint_change:
        GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf')
        if confdata.op == 'set':
            logging.info('checkpoint %s set' % confdata.val)
        elif confdata.op == 'del':
            logging.info('checkpoint info was reset')
        return

    go_daemon = rconf['go_daemon']
    be_monitor = rconf.get('monitor')

    if not be_monitor and isinstance(remote, resource.SSH) and \
       go_daemon == 'should':
        go_daemon = 'postconn'
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = 'monitor'
    elif remote:
        #master
        label = ''
    else:
        label = 'slave'
    startup(go_daemon=go_daemon, log_file=log_file, label=label)

    if be_monitor:
        return monitor()

    logging.info("syncing: %s" % " -> ".join(peers))
    resource.Popen.init_errhandler()
    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon='done')
    local.connect()
    if ffd:
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
    def createNet(self, netip, netmask=24):
        """
        Return and create if necessary network.  netip is in the form
        1.1.1.0/24 or with netmask passed as parameter.  Subnetworks created
        based on the zParameter zDefaulNetworkTree.
        Called by IpNetwork.createIp and IpRouteEntry.setTarget
        If the netmask is invalid, then a netmask of 24 is assumed.

        @param netip: network IP address start
        @type netip: string
        @param netmask: network mask
        @type netmask: integer
        @todo: investigate IPv6 issues
        """
        if '/' in netip:
            netip, netmask = netip.split("/", 1)

        checkip(netip)
        ipobj = IPAddress(ipunwrap_strip(netip))
        try:
            netmask = int(netmask)
        except (TypeError, ValueError):
            netmask = 24
        netmask = netmask if netmask < ipobj.max_prefixlen else 24

        #hook method do not remove!
        netroot = self.getNetworkRoot(ipobj.version)
        netobj = netroot.getNet(netip)
        if netmask == 0:
            raise ValueError("netip '%s' without netmask" % netip)
        if netobj and netobj.netmask >= netmask:  # Network already exists.
            return netobj

        ipNetObj = IPNetwork(netip)
        if ipNetObj.version == 4:
            netip = getnetstr(netip, netmask)
            netTree = getattr(self, 'zDefaultNetworkTree', defaultNetworkTree)
            netTree = map(int, netTree)
            if ipobj.max_prefixlen not in netTree:
                netTree.append(ipobj.max_prefixlen)
        else:
            # IPv6 doesn't use subnet masks the same way
            netip = getnetstr(netip, 64)
            netmask = 64
            # ISPs are supposed to provide the 48-bit prefix to orgs (RFC 3177)
            netTree = (48, )

        if netobj:
            # strip irrelevant values from netTree if we're not starting at /0
            netTree = [m for m in netTree if m > netobj.netmask]
        else:
            # start at /Networks if no containing network was found
            netobj = netroot

        for treemask in netTree:
            if treemask >= netmask:
                netobjParent = netobj
                netobj = netobj.addSubNetwork(netip, netmask)
                self.rebalance(netobjParent, netobj)
                break
            else:
                supnetip = getnetstr(netip, treemask)
                netobjParent = netobj
                netobj = netobj.addSubNetwork(supnetip, treemask)
                self.rebalance(netobjParent, netobj)

        return netobj
Exemple #38
0
    def test_remote_connections_unaffected(self):
        """
        A connection attempt to an IP not assigned to this host on the proxied
        port is not proxied.
        """
        network = IPNetwork("172.16.0.0/12")
        gateway = network[1]
        address = network[2]

        # The strategy taken by this test is to create a new, clean network
        # stack and then treat it like a foreign host.  A connection to that
        # foreign host should not be proxied.  This is possible because Linux
        # supports the creation of an arbitrary number of instances of its
        # network stack, all isolated from each other.
        #
        # To learn more, here are some links:
        #
        # http://man7.org/linux/man-pages/man8/ip-netns.8.html
        # http://blog.scottlowe.org/2013/09/04/introducing-linux-network-namespaces/
        #
        # Note also that Linux network namespaces are how Docker creates
        # isolated network environments.

        # Create a remote "host" that the test can reliably fail a connection
        # attempt to.
        pid = getpid()
        veth0 = b"veth_" + hex(pid)
        veth1 = b"veth1"
        network_namespace = b"%s.%s" % (self.id(), getpid())

        def run(cmd):
            check_call(cmd.split())

        # Destroy whatever system resources we go on to allocate in this test.
        # We set this up first so even if one of the operations encounters an
        # error after a resource has been allocated we'll still clean it up.
        # It's not an error to try to delete things that don't exist
        # (conveniently).
        self.addCleanup(run, b"ip netns delete " + network_namespace)
        self.addCleanup(run, b"ip link delete " + veth0)

        ops = [
            # Create a new network namespace where we can assign a non-local
            # address to use as the target of a connection attempt.
            b"ip netns add %(netns)s",

            # Create a virtual ethernet pair so there is a network link between
            # the host and the new network namespace.
            b"ip link add %(veth0)s type veth peer name %(veth1)s",

            # Assign an address to the virtual ethernet interface that will
            # remain on the host.  This will be our "gateway" into the network
            # namespace.
            b"ip address add %(gateway)s dev %(veth0)s",

            # Bring it up.
            b"ip link set dev %(veth0)s up",

            # Put the other virtual ethernet interface into the network
            # namespace.  Now it will only affect networking behavior for code
            # running in that network namespace, not for code running directly
            # on the host network (like the code in this test and whatever
            # iptables rules we created).
            b"ip link set %(veth1)s netns %(netns)s",

            # Assign to that virtual ethernet interface an address on the same
            # (private, unused) network as the address we gave to the gateway
            # interface.
            b"ip netns exec %(netns)s ip address add %(address)s "
            b"dev %(veth1)s",

            # And bring it up.
            b"ip netns exec %(netns)s ip link set dev %(veth1)s up",

            # Add a route into the network namespace via the virtual interface
            # for traffic bound for addresses on that network.
            b"ip route add %(network)s dev %(veth0)s scope link",

            # And add a reciprocal route so traffic generated inside the
            # network namespace (like TCP RST packets) can get back to us.
            b"ip netns exec %(netns)s ip route add default dev %(veth1)s",
        ]

        params = dict(
            netns=network_namespace,
            veth0=veth0,
            veth1=veth1,
            address=address,
            gateway=gateway,
            network=network,
        )
        for op in ops:
            run(op % params)

        # Create the proxy which we expect not to be invoked.
        self.network.create_proxy_to(self.server_ip, self.port)

        client = socket()
        client.settimeout(1)

        # Try to connect to an address hosted inside that network namespace.
        # It should fail.  It should not be proxied to the server created in
        # setUp.
        exception = self.assertRaises(error, client.connect,
                                      (str(address), self.port))
        self.assertEqual(ECONNREFUSED, exception.errno)
Exemple #39
0
    def ip(self):
        """Return IPNetwork representation of self.ip_network field.

        :return: IPNetwork()
        """
        return IPNetwork(self.ip_network)
Exemple #40
0
 def test_ip_blacklist(self):
     http.DISALLOWED_IPS = set([IPNetwork('127.0.0.1')])
     with pytest.raises(SuspiciousOperation):
         http.safe_urlopen('http://127.0.0.1')
Exemple #41
0
 def internal_virtual_ip(self):
     return str(
         IPNetwork(
             self.environment().network_by_name('internal').ip_network)[-2])
Exemple #42
0
                       help='BGP /32 announces block (default: none)')
argparser.add_argument('--sessions',
                       metavar='COUNT',
                       dest='sessions',
                       default=1,
                       help='number of BGP sessions to establish (default: 1)')
args = argparser.parse_args()

# validate command-line options
try:
    peer_ip = IPAddress(args.peer_ip)
except:
    print 'invalid peer address "%s" - aborting' % args.peer_ip
    sys.exit(1)
try:
    local_cidr = IPNetwork(args.local_cidr)
except:
    print 'invalid local addresses block "%s" - aborting' % args.local_cidr
    sys.exit(1)
if args.announce_cidr:
    try:
        announce_cidr = IPNetwork(args.announce_cidr)
        announce_cidr = announce_cidr.iterhosts()
    except:
        print 'invalid announce addresses block %s - aborting' % args.announce_cidr
        sys.exit(1)

# create ExaBGP configuration + setup address aliases if required
configuration = 'group peers {\n'
sessions = 0
for address in local_cidr.iterhosts():
Exemple #43
0
 def _router(self, router_name):
     return str(
         IPNetwork(
             self.environment().network_by_name(router_name).ip_network)[1])
Exemple #44
0
def main_i():
    """internal main routine

    parse command line, decide what action will be taken;
    we can either:
    - query/manipulate configuration
    - format gsyncd urls using gsyncd's url parsing engine
    - start service in following modes, in given stages:
      - agent: startup(), ChangelogAgent()
      - monitor: startup(), monitor()
      - master: startup(), connect_remote(), connect(), service_loop()
      - slave: startup(), connect(), service_loop()
    """
    rconf = {'go_daemon': 'should'}

    def store_abs(opt, optstr, val, parser):
        if val and val != '-':
            val = os.path.abspath(val)
        setattr(parser.values, opt.dest, val)

    def store_local(opt, optstr, val, parser):
        rconf[opt.dest] = val

    def store_local_curry(val):
        return lambda o, oo, vx, p: store_local(o, oo, val, p)

    def store_local_obj(op, dmake):
        return lambda o, oo, vx, p: store_local(
            o, oo, FreeObject(op=op, **dmake(vx)), p)

    op = OptionParser(usage="%prog [options...] <master> <slave>",
                      version="%prog 0.0.1")
    op.add_option('--gluster-command-dir', metavar='DIR', default='')
    op.add_option('--gluster-log-file',
                  metavar='LOGF',
                  default=os.devnull,
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--gluster-log-level', metavar='LVL')
    op.add_option('--changelog-log-level', metavar='LVL', default="INFO")
    op.add_option('--gluster-params', metavar='PRMS', default='')
    op.add_option('--glusterd-uuid',
                  metavar='UUID',
                  type=str,
                  default='',
                  help=SUPPRESS_HELP)
    op.add_option('--gluster-cli-options',
                  metavar='OPTS',
                  default='--log-file=-')
    op.add_option('--mountbroker', metavar='LABEL')
    op.add_option('-p',
                  '--pid-file',
                  metavar='PIDF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('-l',
                  '--log-file',
                  metavar='LOGF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--iprefix',
                  metavar='LOGD',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--changelog-log-file',
                  metavar='LOGF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--log-file-mbr',
                  metavar='LOGF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--state-file',
                  metavar='STATF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--state-detail-file',
                  metavar='STATF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--georep-session-working-dir',
                  metavar='STATF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--access-mount', default=False, action='store_true')
    op.add_option('--ignore-deletes', default=False, action='store_true')
    op.add_option('--isolated-slave', default=False, action='store_true')
    op.add_option('--use-rsync-xattrs', default=False, action='store_true')
    op.add_option('--sync-xattrs', default=True, action='store_true')
    op.add_option('--sync-acls', default=True, action='store_true')
    op.add_option('--log-rsync-performance',
                  default=False,
                  action='store_true')
    op.add_option('--max-rsync-retries', type=int, default=10)
    # Max size of Changelogs to process per batch, Changelogs Processing is
    # not limited by the number of changelogs but instead based on
    # size of the changelog file, One sample changelog file size was 145408
    # with ~1000 CREATE and ~1000 DATA. 5 such files in one batch is 727040
    # If geo-rep worker crashes while processing a batch, it has to retry only
    # that batch since stime will get updated after each batch.
    op.add_option('--changelog-batch-size', type=int, default=727040)
    op.add_option('--pause-on-start', default=False, action='store_true')
    op.add_option('-L', '--log-level', metavar='LVL')
    op.add_option('-r',
                  '--remote-gsyncd',
                  metavar='CMD',
                  default=os.path.abspath(sys.argv[0]))
    op.add_option('--volume-id', metavar='UUID')
    op.add_option('--slave-id', metavar='ID')
    op.add_option('--session-owner', metavar='ID')
    op.add_option('--local-id', metavar='ID', help=SUPPRESS_HELP, default='')
    op.add_option('--local-node',
                  metavar='NODE',
                  help=SUPPRESS_HELP,
                  default='')
    op.add_option('--local-node-id',
                  metavar='NODEID',
                  help=SUPPRESS_HELP,
                  default='')
    op.add_option('--local-path',
                  metavar='PATH',
                  help=SUPPRESS_HELP,
                  default='')
    op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh')
    op.add_option('--ssh-port', metavar='PORT', type=int, default=22)
    op.add_option('--ssh-command-tar', metavar='CMD', default='ssh')
    op.add_option('--rsync-command', metavar='CMD', default='rsync')
    op.add_option('--rsync-options', metavar='OPTS', default='')
    op.add_option('--rsync-ssh-options', metavar='OPTS', default='--compress')
    op.add_option('--rsync-opt-ignore-missing-args', default="true")
    op.add_option('--rsync-opt-existing', default="true")
    op.add_option('--timeout', metavar='SEC', type=int, default=120)
    op.add_option('--connection-timeout',
                  metavar='SEC',
                  type=int,
                  default=60,
                  help=SUPPRESS_HELP)
    op.add_option('--sync-jobs', metavar='N', type=int, default=3)
    op.add_option('--replica-failover-interval',
                  metavar='N',
                  type=int,
                  default=1)
    op.add_option('--changelog-archive-format',
                  metavar='N',
                  type=str,
                  default="%Y%m")
    op.add_option('--use-meta-volume', default=False, action='store_true')
    op.add_option('--meta-volume-mnt',
                  metavar='N',
                  type=str,
                  default="/var/run/gluster/shared_storage")
    op.add_option('--turns',
                  metavar='N',
                  type=int,
                  default=0,
                  help=SUPPRESS_HELP)
    op.add_option('--allow-network', metavar='IPS', default='')
    op.add_option('--socketdir', metavar='DIR')
    op.add_option('--state-socket-unencoded',
                  metavar='SOCKF',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--checkpoint', metavar='LABEL', default='0')

    # tunables for failover/failback mechanism:
    # None   - gsyncd behaves as normal
    # blind  - gsyncd works with xtime pairs to identify
    #          candidates for synchronization
    # wrapup - same as normal mode but does not assign
    #          xtimes to orphaned files
    # see crawl() for usage of the above tunables
    op.add_option('--special-sync-mode', type=str, help=SUPPRESS_HELP)

    # changelog or xtime? (TODO: Change the default)
    op.add_option('--change-detector',
                  metavar='MODE',
                  type=str,
                  default='xtime')
    # sleep interval for change detection (xtime crawl uses a hardcoded 1
    # second sleep time)
    op.add_option('--change-interval', metavar='SEC', type=int, default=3)
    # working directory for changelog based mechanism
    op.add_option('--working-dir',
                  metavar='DIR',
                  type=str,
                  action='callback',
                  callback=store_abs)
    op.add_option('--use-tarssh', default=False, action='store_true')

    op.add_option('-c',
                  '--config-file',
                  metavar='CONF',
                  type=str,
                  action='callback',
                  callback=store_local)
    # duh. need to specify dest or value will be mapped to None :S
    op.add_option('--monitor',
                  dest='monitor',
                  action='callback',
                  callback=store_local_curry(True))
    op.add_option('--agent',
                  dest='agent',
                  action='callback',
                  callback=store_local_curry(True))
    op.add_option('--resource-local',
                  dest='resource_local',
                  type=str,
                  action='callback',
                  callback=store_local)
    op.add_option('--resource-remote',
                  dest='resource_remote',
                  type=str,
                  action='callback',
                  callback=store_local)
    op.add_option('--feedback-fd',
                  dest='feedback_fd',
                  type=int,
                  help=SUPPRESS_HELP,
                  action='callback',
                  callback=store_local)
    op.add_option('--rpc-fd', dest='rpc_fd', type=str, help=SUPPRESS_HELP)
    op.add_option('--subvol-num',
                  dest='subvol_num',
                  type=str,
                  help=SUPPRESS_HELP)
    op.add_option('--listen',
                  dest='listen',
                  help=SUPPRESS_HELP,
                  action='callback',
                  callback=store_local_curry(True))
    op.add_option('-N',
                  '--no-daemon',
                  dest="go_daemon",
                  action='callback',
                  callback=store_local_curry('dont'))
    op.add_option('--verify',
                  type=str,
                  dest="verify",
                  action='callback',
                  callback=store_local)
    op.add_option('--slavevoluuid-get',
                  type=str,
                  dest="slavevoluuid_get",
                  action='callback',
                  callback=store_local)
    op.add_option('--create',
                  type=str,
                  dest="create",
                  action='callback',
                  callback=store_local)
    op.add_option('--delete',
                  dest='delete',
                  action='callback',
                  callback=store_local_curry(True))
    op.add_option('--path-list',
                  dest='path_list',
                  action='callback',
                  type=str,
                  callback=store_local)
    op.add_option('--reset-sync-time', default=False, action='store_true')
    op.add_option('--status-get',
                  dest='status_get',
                  action='callback',
                  callback=store_local_curry(True))
    op.add_option('--debug',
                  dest="go_daemon",
                  action='callback',
                  callback=lambda *a:
                  (store_local_curry('dont')
                   (*a), setattr(a[-1].values, 'log_file', '-'),
                   setattr(a[-1].values, 'log_level', 'DEBUG'),
                   setattr(a[-1].values, 'changelog_log_file', '-')))
    op.add_option('--path', type=str, action='append')

    for a in ('check', 'get'):
        op.add_option('--config-' + a,
                      metavar='OPT',
                      type=str,
                      dest='config',
                      action='callback',
                      callback=store_local_obj(a, lambda vx: {'opt': vx}))
    op.add_option('--config-get-all',
                  dest='config',
                  action='callback',
                  callback=store_local_obj('get', lambda vx: {'opt': None}))
    for m in ('', '-rx', '-glob'):
        # call this code 'Pythonic' eh?
        # have to define a one-shot local function to be able
        # to inject (a value depending on the)
        # iteration variable into the inner lambda
        def conf_mod_opt_regex_variant(rx):
            op.add_option('--config-set' + m,
                          metavar='OPT VAL',
                          type=str,
                          nargs=2,
                          dest='config',
                          action='callback',
                          callback=store_local_obj(
                              'set', lambda vx: {
                                  'opt': vx[0],
                                  'val': vx[1],
                                  'rx': rx
                              }))
            op.add_option('--config-del' + m,
                          metavar='OPT',
                          type=str,
                          dest='config',
                          action='callback',
                          callback=store_local_obj(
                              'del', lambda vx: {
                                  'opt': vx,
                                  'rx': rx
                              }))

        conf_mod_opt_regex_variant(m and m[1:] or False)

    op.add_option('--normalize-url',
                  dest='url_print',
                  action='callback',
                  callback=store_local_curry('normal'))
    op.add_option('--canonicalize-url',
                  dest='url_print',
                  action='callback',
                  callback=store_local_curry('canon'))
    op.add_option('--canonicalize-escape-url',
                  dest='url_print',
                  action='callback',
                  callback=store_local_curry('canon_esc'))
    op.add_option('--is-hottier', default=False, action='store_true')

    tunables = [
        norm(o.get_opt_string()[2:]) for o in op.option_list
        if (o.callback in (store_abs, 'store_true',
                           None) and o.get_opt_string() not in ('--version',
                                                                '--help'))
    ]
    remote_tunables = [
        'listen', 'go_daemon', 'timeout', 'session_owner', 'config_file',
        'use_rsync_xattrs', 'local_id', 'local_node', 'access_mount'
    ]
    rq_remote_tunables = {'listen': True}

    # precedence for sources of values: 1) commandline, 2) cfg file, 3)
    # defaults for this to work out we need to tell apart defaults from
    # explicitly set options... so churn out the defaults here and call
    # the parser with virgin values container.
    defaults = op.get_default_values()
    opts, args = op.parse_args(values=optparse.Values())
    # slave url cleanup, if input comes with vol uuid as follows
    # 'ssh://fvm1::gv2:07dfddca-94bb-4841-a051-a7e582811467'
    temp_args = []
    for arg in args:
        # Split based on ::
        data = arg.split("::")
        if len(data) > 1:
            slavevol_name = data[1].split(":")[0]
            temp_args.append("%s::%s" % (data[0], slavevol_name))
        else:
            temp_args.append(data[0])
    args = temp_args
    args_orig = args[:]

    voluuid_get = rconf.get('slavevoluuid_get')
    if voluuid_get:
        slave_host, slave_vol = voluuid_get.split("::")
        svol_uuid = slave_vol_uuid_get(slave_host, slave_vol)
        print svol_uuid
        return

    r = rconf.get('resource_local')
    if r:
        if len(args) == 0:
            args.append(None)
        args[0] = r
    r = rconf.get('resource_remote')
    if r:
        if len(args) == 0:
            raise GsyncdError('local resource unspecfied')
        elif len(args) == 1:
            args.append(None)
        args[1] = r
    confdata = rconf.get('config')
    if not (len(args) == 2 or (len(args) == 1 and rconf.get('listen')) or
            (len(args) <= 2 and confdata) or rconf.get('url_print')):
        sys.stderr.write("error: incorrect number of arguments\n\n")
        sys.stderr.write(op.get_usage() + "\n")
        sys.exit(1)

    verify = rconf.get('verify')
    if verify:
        logging.info(verify)
        logging.info("Able to spawn gsyncd.py")
        return

    restricted = os.getenv('_GSYNCD_RESTRICTED_')

    if restricted:
        allopts = {}
        allopts.update(opts.__dict__)
        allopts.update(rconf)
        bannedtuns = set(allopts.keys()) - set(remote_tunables)
        if bannedtuns:
            raise GsyncdError('following tunables cannot be set with '
                              'restricted SSH invocaton: ' +
                              ', '.join(bannedtuns))
        for k, v in rq_remote_tunables.items():
            if not k in allopts or allopts[k] != v:
                raise GsyncdError('tunable %s is not set to value %s required '
                                  'for restricted SSH invocaton' % (k, v))

    confrx = getattr(confdata, 'rx', None)

    def makersc(aa, check=True):
        if not aa:
            return ([], None, None)
        ra = [resource.parse_url(u) for u in aa]
        local = ra[0]
        remote = None
        if len(ra) > 1:
            remote = ra[1]
        if check and not local.can_connect_to(remote):
            raise GsyncdError("%s cannot work with %s" %
                              (local.path, remote and remote.path))
        return (ra, local, remote)

    if confrx:
        # peers are regexen, don't try to parse them
        if confrx == 'glob':
            args = ['\A' + fnmatch.translate(a) for a in args]
        canon_peers = args
        namedict = {}
    else:
        dc = rconf.get('url_print')
        rscs, local, remote = makersc(args_orig, not dc)
        if dc:
            for r in rscs:
                print(
                    r.get_url(
                        **{
                            'normal': {},
                            'canon': {
                                'canonical': True
                            },
                            'canon_esc': {
                                'canonical': True,
                                'escaped': True
                            }
                        }[dc]))
            return
        pa = ([], [], [])
        urlprms = ({}, {
            'canonical': True
        }, {
            'canonical': True,
            'escaped': True
        })
        for x in rscs:
            for i in range(len(pa)):
                pa[i].append(x.get_url(**urlprms[i]))
        _, canon_peers, canon_esc_peers = pa
        # creating the namedict, a dict representing various ways of referring
        # to / repreenting peers to be fillable in config templates
        mods = (lambda x: x, lambda x: x[0].upper() + x[1:],
                lambda x: 'e' + x[0].upper() + x[1:])
        if remote:
            rmap = {local: ('local', 'master'), remote: ('remote', 'slave')}
        else:
            rmap = {local: ('local', 'slave')}
        namedict = {}
        for i in range(len(rscs)):
            x = rscs[i]
            for name in rmap[x]:
                for j in range(3):
                    namedict[mods[j](name)] = pa[j][i]
                namedict[name + 'vol'] = x.volume
                if name == 'remote':
                    namedict['remotehost'] = x.remotehost

    if not 'config_file' in rconf:
        rconf['config_file'] = TMPL_CONFIG_FILE

    # Upgrade Config File only if it is session conf file
    if rconf['config_file'] != TMPL_CONFIG_FILE:
        upgrade_config_file(rconf['config_file'], confdata)

    gcnf = GConffile(rconf['config_file'], canon_peers, confdata,
                     defaults.__dict__, opts.__dict__, namedict)

    conf_change = False
    if confdata:
        opt_ok = norm(confdata.opt) in tunables + [None]
        if confdata.op == 'check':
            if opt_ok:
                sys.exit(0)
            else:
                sys.exit(1)
        elif not opt_ok:
            raise GsyncdError("not a valid option: " + confdata.opt)
        if confdata.op == 'get':
            gcnf.get(confdata.opt)
        elif confdata.op == 'set':
            gcnf.set(confdata.opt, confdata.val, confdata.rx)
        elif confdata.op == 'del':
            gcnf.delete(confdata.opt, confdata.rx)
        # when modifying checkpoint, it's important to make a log
        # of that, so in that case we go on to set up logging even
        # if its just config invocation
        if confdata.op in ('set', 'del') and not confdata.rx:
            conf_change = True

        if not conf_change:
            return

    gconf.__dict__.update(defaults.__dict__)
    gcnf.update_to(gconf.__dict__)
    gconf.__dict__.update(opts.__dict__)
    gconf.configinterface = gcnf

    delete = rconf.get('delete')
    if delete:
        logging.info('geo-replication delete')
        # remove the stime xattr from all the brick paths so that
        # a re-create of a session will start sync all over again
        stime_xattr_name = getattr(gconf, 'master.stime_xattr_name', None)

        # Delete pid file, status file, socket file
        cleanup_paths = []
        if getattr(gconf, 'pid_file', None):
            cleanup_paths.append(gconf.pid_file)

        if getattr(gconf, 'state_file', None):
            cleanup_paths.append(gconf.state_file)

        if getattr(gconf, 'state_detail_file', None):
            cleanup_paths.append(gconf.state_detail_file)

        if getattr(gconf, 'state_socket_unencoded', None):
            cleanup_paths.append(gconf.state_socket_unencoded)

        cleanup_paths.append(rconf['config_file'][:-11] + "*")

        # Cleanup changelog working dirs
        if getattr(gconf, 'working_dir', None):
            try:
                shutil.rmtree(gconf.working_dir)
            except (IOError, OSError):
                if sys.exc_info()[1].errno == ENOENT:
                    pass
                else:
                    raise GsyncdError('Error while removing working dir: %s' %
                                      gconf.working_dir)

        for path in cleanup_paths:
            # To delete temp files
            for f in glob.glob(path + "*"):
                _unlink(f)

        reset_sync_time = boolify(gconf.reset_sync_time)
        if reset_sync_time and stime_xattr_name:
            path_list = rconf.get('path_list')
            paths = []
            for p in path_list.split('--path='):
                stripped_path = p.strip()
                if stripped_path != "":
                    # set stime to (0,0) to trigger full volume content resync
                    # to slave on session recreation
                    # look at master.py::Xcrawl   hint: zero_zero
                    Xattr.lsetxattr(stripped_path, stime_xattr_name,
                                    struct.pack("!II", 0, 0))

        return

    if restricted and gconf.allow_network:
        ssh_conn = os.getenv('SSH_CONNECTION')
        if not ssh_conn:
            # legacy env var
            ssh_conn = os.getenv('SSH_CLIENT')
        if ssh_conn:
            allowed_networks = [
                IPNetwork(a) for a in gconf.allow_network.split(',')
            ]
            client_ip = IPAddress(ssh_conn.split()[0])
            allowed = False
            for nw in allowed_networks:
                if client_ip in nw:
                    allowed = True
                    break
            if not allowed:
                raise GsyncdError("client IP address is not allowed")

    ffd = rconf.get('feedback_fd')
    if ffd:
        fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

    # normalize loglevel
    lvl0 = gconf.log_level
    if isinstance(lvl0, str):
        lvl1 = lvl0.upper()
        lvl2 = logging.getLevelName(lvl1)
        # I have _never_ _ever_ seen such an utterly braindead
        # error condition
        if lvl2 == "Level " + lvl1:
            raise GsyncdError('cannot recognize log level "%s"' % lvl0)
        gconf.log_level = lvl2

    if not privileged() and gconf.log_file_mbr:
        gconf.log_file = gconf.log_file_mbr

    if conf_change:
        try:
            GLogger._gsyncd_loginit(log_file=gconf.log_file, label='conf')
            gconf.log_exit = False

            if confdata.op == 'set':
                if confdata.opt == 'checkpoint':
                    logging.info(
                        lf("Checkpoint Set",
                           time=human_time_utc(confdata.val)))
                else:
                    logging.info(
                        lf("Config Set",
                           config=confdata.opt,
                           value=confdata.val))
            elif confdata.op == 'del':
                if confdata.opt == 'checkpoint':
                    logging.info("Checkpoint Reset")
                else:
                    logging.info(lf("Config Reset", config=confdata.opt))
        except IOError:
            if sys.exc_info()[1].errno == ENOENT:
                # directory of log path is not present,
                # which happens if we get here from
                # a peer-multiplexed "config-set checkpoint"
                # (as that directory is created only on the
                # original node)
                pass
            else:
                raise
        return

    create = rconf.get('create')
    if create:
        if getattr(gconf, 'state_file', None):
            set_monitor_status(gconf.state_file, create)

        try:
            GLogger._gsyncd_loginit(log_file=gconf.log_file, label='monitor')
            gconf.log_exit = False
            logging.info(lf("Monitor Status Change", status=create))
        except IOError:
            if sys.exc_info()[1].errno == ENOENT:
                # If log dir not present
                pass
            else:
                raise
        return

    go_daemon = rconf['go_daemon']
    be_monitor = rconf.get('monitor')
    be_agent = rconf.get('agent')

    rscs, local, remote = makersc(args)

    status_get = rconf.get('status_get')
    if status_get:
        master_name, slave_data = get_master_and_slave_data_from_args(args)
        for brick in gconf.path:
            brick_status = GeorepStatus(gconf.state_file, gconf.local_node,
                                        brick, gconf.local_node_id,
                                        master_name, slave_data,
                                        getattr(gconf, "pid_file", None))
            checkpoint_time = int(getattr(gconf, "checkpoint", "0"))
            brick_status.print_status(checkpoint_time=checkpoint_time)
        return

    if not be_monitor and isinstance(remote, resource.SSH) and \
       go_daemon == 'should':
        go_daemon = 'postconn'
        log_file = None
    else:
        log_file = gconf.log_file
    if be_monitor:
        label = 'monitor'
    elif be_agent:
        label = gconf.local_path
    elif remote:
        # master
        label = gconf.local_path
    else:
        label = 'slave'
    startup(go_daemon=go_daemon, log_file=log_file, label=label)
    Popen.init_errhandler()

    if be_agent:
        os.setsid()
        logging.debug(lf("RPC FD", rpc_fd=repr(gconf.rpc_fd)))
        return agent(Changelog(), gconf.rpc_fd)

    if be_monitor:
        return monitor(*rscs)

    if remote:
        go_daemon = remote.connect_remote(go_daemon=go_daemon)
        if go_daemon:
            startup(go_daemon=go_daemon, log_file=gconf.log_file)
            # complete remote connection in child
            remote.connect_remote(go_daemon='done')
    local.connect()
    if ffd:
        logging.info("Closing feedback fd, waking up the monitor")
        os.close(ffd)
    local.service_loop(*[r for r in [remote] if r])
Exemple #45
0
 def default_gw(self):
     return IPNetwork(self.ip_network)[1]