Esempio n. 1
0
def create_certificate(keyout=None, certout=None):
    if not keyout:
        keyout, certout = get_certificate_paths()
    if not keyout:
        raise Exception('Unable to locate TLS certificate path automatically')
    shortname = socket.gethostname().split('.')[0]
    longname = socket.getfqdn()
    subprocess.check_call([
        'openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out', keyout
    ])
    san = ['IP:{0}'.format(x) for x in get_ip_addresses()]
    # It is incorrect to put IP addresses as DNS type.  However
    # there exists non-compliant clients that fail with them as IP
    san.extend(['DNS:{0}'.format(x) for x in get_ip_addresses()])
    san.append('DNS:{0}'.format(shortname))
    san.append('DNS:{0}'.format(longname))
    san = ','.join(san)
    sslcfg = get_openssl_conf_location()
    tmpconfig = tempfile.mktemp()
    shutil.copy2(sslcfg, tmpconfig)
    try:
        with open(tmpconfig, 'a') as cfgfile:
            cfgfile.write(
                '\n[SAN]i\nbasicConstraints = CA:true\nsubjectAltName={0}'.
                format(san))
        subprocess.check_call([
            'openssl', 'req', '-new', '-x509', '-key', keyout, '-days', '7300',
            '-out', certout, '-subj', '/CN={0}'.format(longname),
            '-extensions', 'SAN', '-config', tmpconfig
        ])
    finally:
        os.remove(tmpconfig)
    # Could restart the webserver now?
    fname = '/var/lib/confluent/public/site/tls/{0}.pem'.format(
        collective.get_myname())
    try:
        os.makedirs(os.path.dirname(fname))
    except OSError as e:
        if e.errno != 17:
            raise
    shutil.copy2(certout, fname)
    hv = subprocess.check_output(
        ['openssl', 'x509', '-in', certout, '-hash', '-noout'])
    if not isinstance(hv, str):
        hv = hv.decode('utf8')
    hv = hv.strip()
    hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv)
    certname = '{0}.pem'.format(collective.get_myname())
    for currname in os.listdir('/var/lib/confluent/public/site/tls/'):
        currname = os.path.join('/var/lib/confluent/public/site/tls/',
                                currname)
        if currname.endswith('.0'):
            try:
                realname = os.readlink(currname)
                if realname == certname:
                    os.unlink(currname)
            except OSError:
                pass
    os.symlink(certname, hashname)
Esempio n. 2
0
def assure_tls_ca():
    keyout, certout = ('/etc/confluent/tls/cakey.pem',
                       '/etc/confluent/tls/cacert.pem')
    if os.path.exists(certout):
        return
    try:
        os.makedirs('/etc/confluent/tls')
    except OSError as e:
        if e.errno != 17:
            raise
    sslcfg = get_openssl_conf_location()
    tmpconfig = tempfile.mktemp()
    shutil.copy2(sslcfg, tmpconfig)
    subprocess.check_call([
        'openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out', keyout
    ])
    try:
        with open(tmpconfig, 'a') as cfgfile:
            cfgfile.write('\n[CACert]\nbasicConstraints = CA:true\n')
        subprocess.check_call([
            'openssl', 'req', '-new', '-x509', '-key', keyout, '-days',
            '27300', '-out', certout, '-subj',
            '/CN=Confluent TLS Certificate authority ({0})'.format(
                socket.gethostname()), '-extensions', 'CACert', '-config',
            tmpconfig
        ])
    finally:
        os.remove(tmpconfig)
    # Could restart the webserver now?
    fname = '/var/lib/confluent/public/site/tls/{0}.pem'.format(
        collective.get_myname())
    try:
        os.makedirs(os.path.dirname(fname))
    except OSError as e:
        if e.errno != 17:
            raise
    shutil.copy2('/etc/confluent/tls/cacert.pem', fname)
    hv = subprocess.check_output([
        'openssl', 'x509', '-in', '/etc/confluent/tls/cacert.pem', '-hash',
        '-noout'
    ])
    if not isinstance(hv, str):
        hv = hv.decode('utf8')
    hv = hv.strip()
    hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv)
    certname = '{0}.pem'.format(collective.get_myname())
    for currname in os.listdir('/var/lib/confluent/public/site/tls/'):
        currname = os.path.join('/var/lib/confluent/public/site/tls/',
                                currname)
        if currname.endswith('.0'):
            try:
                realname = os.readlink(currname)
                if realname == certname:
                    os.unlink(currname)
            except OSError:
                pass
    os.symlink(certname, hashname)
Esempio n. 3
0
def connect_node(node, configmanager, username=None):
    attrval = configmanager.get_node_attributes(node, 'collective.manager')
    myc = attrval.get(node, {}).get('collective.manager', {}).get(
        'value', None)
    myname = collective.get_myname()
    if myc and myc != collective.get_myname():
        minfo = configmodule.get_collective_member(myc)
        return ProxyConsole(node, minfo, myname, configmanager, username)
    consk = (node, configmanager.tenant)
    if consk not in _handled_consoles:
        _handled_consoles[consk] = ConsoleHandler(node, configmanager)
    return _handled_consoles[consk]
Esempio n. 4
0
def get_cluster_list(nodename=None, cfg=None):
    if cfg is None:
        cfg = configmanager.ConfigManager(None)
    nodes = None
    if nodename is not None:
        sshpeers = cfg.get_node_attributes(nodename, 'ssh.trustnodes')
        sshpeers = sshpeers.get(nodename, {}).get('ssh.trustnodes',
                                                  {}).get('value', None)
        if sshpeers:
            nodes = noderange.NodeRange(sshpeers, cfg).nodes
    autonodes = False
    if nodes is None:
        autonodes = True
        nodes = set(cfg.list_nodes())
    domain = None
    for node in list(util.natural_sort(nodes)):
        if domain is None:
            domaininfo = cfg.get_node_attributes(node, 'dns.domain')
            domain = domaininfo.get(node, {}).get('dns.domain',
                                                  {}).get('value', None)
        for extraname in get_extra_names(node, cfg):
            nodes.add(extraname)
    if autonodes:
        for mgr in configmanager.list_collective():
            nodes.add(mgr)
            if domain and domain not in mgr:
                nodes.add('{0}.{1}'.format(mgr, domain))
        myname = collective.get_myname()
        nodes.add(myname)
        if domain and domain not in myname:
            nodes.add('{0}.{1}'.format(myname, domain))
    return nodes, domain
Esempio n. 5
0
def initialize_root_key(generate):
    authorized = []
    myname = collective.get_myname()
    for currkey in glob.glob('/root/.ssh/*.pub'):
        authorized.append(currkey)
    if generate and not authorized:
        subprocess.check_call([
            'ssh-keygen', '-t', 'ed25519', '-f', '/root/.ssh/id_ed25519', '-N',
            ''
        ])
        for currkey in glob.glob('/root/.ssh/*.pub'):
            authorized.append(currkey)
    try:
        os.makedirs('/var/lib/confluent/public/site/ssh', mode=0o755)
        neededuid = os.stat('/etc/confluent').st_uid
        os.chown('/var/lib/confluent', neededuid, -1)
        os.chown('/var/lib/confluent/public', neededuid, -1)
        os.chown('/var/lib/confluent/public/site', neededuid, -1)
        os.chown('/var/lib/confluent/public/site/ssh', neededuid, -1)
    except OSError as e:
        if e.errno != 17:
            raise
    neededuid = os.stat('/etc/confluent').st_uid
    for auth in authorized:
        shutil.copy(
            auth,
            '/var/lib/confluent/public/site/ssh/{0}.rootpubkey'.format(myname))
        os.chmod(
            '/var/lib/confluent/public/site/ssh/{0}.rootpubkey'.format(myname),
            0o644)
        os.chown(
            '/var/lib/confluent/public/site/ssh/{0}.rootpubkey'.format(myname),
            neededuid, -1)
Esempio n. 6
0
def get_switchcreds(configmanager, switches):
    switchcfg = configmanager.get_node_attributes(
        switches,
        ('secret.hardwaremanagementuser', 'secret.snmpcommunity',
         'secret.hardwaremanagementpassword', 'collective.managercandidates'),
        decrypt=True)
    switchauth = []
    for switch in switches:
        if not switch:
            continue
        candmgrs = switchcfg.get(switch,
                                 {}).get('collective.managercandidates',
                                         {}).get('value', None)
        if candmgrs:
            candmgrs = noderange.NodeRange(candmgrs, configmanager).nodes
            if collective.get_myname() not in candmgrs:
                continue
        switchparms = switchcfg.get(switch, {})
        user = None
        password = switchparms.get('secret.snmpcommunity',
                                   {}).get('value', None)
        if not password:
            password = switchparms.get('secret.hardwaremanagementpassword',
                                       {}).get('value', 'public')
            user = switchparms.get('secret.hardwaremanagementuser',
                                   {}).get('value', None)
            if not user:
                user = None
        switchauth.append((switch, password, user, configmanager))
    return switchauth
Esempio n. 7
0
def _start_tenant_sessions(cfm):
    nodeattrs = cfm.get_node_attributes(cfm.list_nodes(), 'collective.manager')
    for node in nodeattrs:
        manager = nodeattrs[node].get('collective.manager', {}).get('value',
                                                                    None)
        if manager and collective.get_myname() != manager:
            continue
        try:
            connect_node(node, cfm)
        except:
            _tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
                          event=log.Events.stacktrace)
    cfm.watch_nodecollection(_nodechange)
Esempio n. 8
0
 def check_collective(self, attrvalue):
     myc = attrvalue.get(self.node, {}).get('collective.manager', {}).get(
         'value', None)
     if configmodule.list_collective() and not myc:
         self._is_local = False
         self._detach()
         self._disconnect()
     if myc and myc != collective.get_myname():
         # Do not do console connect for nodes managed by another
         # confluent collective member
         self._is_local = False
         self._detach()
         self._disconnect()
     else:
         self._is_local = True
Esempio n. 9
0
def initialize_root_key(generate, automation=False):
    authorized = []
    myname = collective.get_myname()
    for currkey in glob.glob('/root/.ssh/*.pub'):
        authorized.append(currkey)
    if generate and not authorized and not automation:
        subprocess.check_call(['ssh-keygen', '-t', 'ed25519', '-f', '/root/.ssh/id_ed25519', '-N', ''])
        for currkey in glob.glob('/root/.ssh/*.pub'):
            authorized.append(currkey)
    if automation and generate:
        subprocess.check_call(
            ['ssh-keygen', '-t', 'ed25519',
            '-f','/etc/confluent/ssh/automation', '-N', get_passphrase(),
            '-C', 'Confluent Automation by {}'.format(myname)],
            preexec_fn=normalize_uid)
        authorized = ['/etc/confluent/ssh/automation.pub']
    try:
        os.makedirs('/var/lib/confluent/public/site/ssh', mode=0o755)
        neededuid = os.stat('/etc/confluent').st_uid
        os.chown('/var/lib/confluent', neededuid, -1)
        os.chown('/var/lib/confluent/public', neededuid, -1)
        os.chown('/var/lib/confluent/public/site', neededuid, -1)
        os.chown('/var/lib/confluent/public/site/ssh', neededuid, -1)
    except OSError as e:
        if e.errno != 17:
            raise
    neededuid = os.stat('/etc/confluent').st_uid
    if automation:
        suffix = 'automationpubkey'
    else:
        suffix = 'rootpubkey'
    for auth in authorized:
        shutil.copy(
            auth,
            '/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
                    myname, suffix))
        os.chmod('/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
                myname, suffix), 0o644)
        os.chown('/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
                myname, suffix), neededuid, -1)
Esempio n. 10
0
def initialize_ca():
    ouid = normalize_uid()
    try:
        os.makedirs('/etc/confluent/ssh', mode=0o700)
    except OSError as e:
        if e.errno != 17:
            raise
    finally:
        os.seteuid(ouid)
    myname = collective.get_myname()
    comment = '{0} SSH CA'.format(myname)
    subprocess.check_call(
        ['ssh-keygen', '-C', comment, '-t', 'ed25519', '-f',
         '/etc/confluent/ssh/ca', '-N', get_passphrase()],
         preexec_fn=normalize_uid)
    try:
        os.makedirs('/var/lib/confluent/public/site/ssh/', mode=0o755)
    except OSError as e:
        if e.errno != 17:
            raise
    cafilename = '/var/lib/confluent/public/site/ssh/{0}.ca'.format(myname)
    shutil.copy('/etc/confluent/ssh/ca.pub', cafilename)
Esempio n. 11
0
def handle_node_request(configmanager,
                        inputdata,
                        operation,
                        pathcomponents,
                        autostrip=True):
    if log.logfull:
        raise exc.TargetResourceUnavailable(
            'Filesystem full, free up space and restart confluent service')
    iscollection = False
    routespec = None
    if pathcomponents[0] == 'noderange':
        if len(pathcomponents) > 3 and pathcomponents[2] == 'nodes':
            # transform into a normal looking node request
            # this does mean we don't see if it is a valid
            # child, but that's not a goal for the noderange
            # facility anyway
            isnoderange = False
            pathcomponents = pathcomponents[2:]
        elif len(pathcomponents) == 3 and pathcomponents[2] == 'abbreviate':
            return abbreviate_noderange(configmanager, inputdata, operation)
        else:
            isnoderange = True
    else:
        isnoderange = False
    try:
        nodeorrange = pathcomponents[1]
        if not isnoderange and not configmanager.is_node(nodeorrange):
            raise exc.NotFoundException("Invalid Node")
        if isnoderange and not (len(pathcomponents) == 3
                                and pathcomponents[2] == 'abbreviate'):
            try:
                nodes = noderange.NodeRange(nodeorrange, configmanager).nodes
            except Exception as e:
                raise exc.NotFoundException("Invalid Noderange: " + str(e))
        else:
            nodes = (nodeorrange, )
    except IndexError:  # doesn't actually have a long enough path
        # this is enumerating a list of nodes or just empty noderange
        if isnoderange and operation == "retrieve":
            return iterate_collections([])
        elif isnoderange and operation == "create":
            inputdata = msg.InputAttributes(pathcomponents, inputdata)
            return create_noderange(inputdata.attribs, configmanager)
        elif isnoderange or operation == "delete":
            raise exc.InvalidArgumentException()
        if operation == "create":
            inputdata = msg.InputAttributes(pathcomponents, inputdata)
            return create_node(inputdata.attribs, configmanager)
        allnodes = list(configmanager.list_nodes())
        try:
            allnodes.sort(key=noderange.humanify_nodename)
        except TypeError:
            allnodes.sort()
        return iterate_collections(allnodes)
    if (isnoderange and len(pathcomponents) == 3
            and pathcomponents[2] == 'nodes'):
        # this means that it's a list of relevant nodes
        nodes = list(nodes)
        try:
            nodes.sort(key=noderange.humanify_nodename)
        except TypeError:
            nodes.sort()
        return iterate_collections(nodes)
    if len(pathcomponents) == 2:
        iscollection = True
    else:
        try:
            routespec = nested_lookup(noderesources, pathcomponents[2:])
        except KeyError:
            raise exc.NotFoundException("Invalid element requested")
        if isinstance(routespec, dict):
            iscollection = True
        elif isinstance(routespec, PluginCollection):
            iscollection = False  # it is a collection, but plugin defined
        elif routespec is None:
            raise exc.InvalidArgumentException(
                'Custom interface required for resource')
    if iscollection:
        if operation == "delete":
            return delete_node_collection(pathcomponents, configmanager,
                                          isnoderange)
        elif operation == "retrieve":
            return enumerate_node_collection(pathcomponents, configmanager)
        else:
            raise Exception("TODO here")
    del pathcomponents[0:2]
    passvalues = queue.Queue()
    plugroute = routespec.routeinfo
    msginputdata = msg.get_input_message(pathcomponents, operation, inputdata,
                                         nodes, isnoderange, configmanager)
    if 'handler' in plugroute:  # fixed handler definition, easy enough
        if isinstance(plugroute['handler'], str):
            hfunc = getattr(pluginmap[plugroute['handler']], operation)
        else:
            hfunc = getattr(plugroute['handler'], operation)
        passvalue = hfunc(nodes=nodes,
                          element=pathcomponents,
                          configmanager=configmanager,
                          inputdata=msginputdata)
        if isnoderange:
            return passvalue
        elif isinstance(passvalue, console.Console):
            return [passvalue]
        else:
            return stripnode(passvalue, nodes[0])
    elif 'pluginattrs' in plugroute:
        nodeattr = configmanager.get_node_attributes(
            nodes, plugroute['pluginattrs'] + ['collective.manager'])
        plugpath = None
        nodesbymanager = {}
        nodesbyhandler = {}
        badcollnodes = []
        for node in nodes:
            for attrname in plugroute['pluginattrs']:
                if attrname in nodeattr[node]:
                    plugpath = nodeattr[node][attrname]['value']
                elif 'default' in plugroute:
                    plugpath = plugroute['default']
            if plugpath in dispatch_plugins:
                cfm.check_quorum()
                manager = nodeattr[node].get('collective.manager',
                                             {}).get('value', None)
                if manager:
                    if collective.get_myname() != manager:
                        if manager not in nodesbymanager:
                            nodesbymanager[manager] = set([node])
                        else:
                            nodesbymanager[manager].add(node)
                        continue
                elif list(cfm.list_collective()):
                    badcollnodes.append(node)
                    continue
            if plugpath:
                try:
                    hfunc = getattr(pluginmap[plugpath], operation)
                except KeyError:
                    nodesbyhandler[BadPlugin(node, plugpath).error] = [node]
                    continue
                if hfunc in nodesbyhandler:
                    nodesbyhandler[hfunc].append(node)
                else:
                    nodesbyhandler[hfunc] = [node]
        for bn in badcollnodes:
            nodesbyhandler[BadCollective(bn).error] = [bn]
        workers = greenpool.GreenPool()
        numworkers = 0
        for hfunc in nodesbyhandler:
            numworkers += 1
            workers.spawn(
                addtoqueue, passvalues, hfunc, {
                    'nodes': nodesbyhandler[hfunc],
                    'element': pathcomponents,
                    'configmanager': configmanager,
                    'inputdata': msginputdata
                })
        for manager in nodesbymanager:
            numworkers += 1
            workers.spawn(
                addtoqueue, passvalues, dispatch_request, {
                    'nodes': nodesbymanager[manager],
                    'manager': manager,
                    'element': pathcomponents,
                    'configmanager': configmanager,
                    'inputdata': inputdata,
                    'operation': operation,
                    'isnoderange': isnoderange
                })
        if isnoderange or not autostrip:
            return iterate_queue(numworkers, passvalues)
        else:
            if numworkers > 0:
                return iterate_queue(numworkers, passvalues, nodes[0])
            else:
                raise exc.NotImplementedException()
Esempio n. 12
0
def dispatch_request(nodes, manager, element, configmanager, inputdata,
                     operation, isnoderange):
    a = configmanager.get_collective_member(manager)
    try:
        remote = socket.create_connection((a['address'], 13001))
        remote.settimeout(180)
        remote = ssl.wrap_socket(remote,
                                 cert_reqs=ssl.CERT_NONE,
                                 keyfile='/etc/confluent/privkey.pem',
                                 certfile='/etc/confluent/srvcert.pem')
    except Exception:
        for node in nodes:
            if a:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    'Collective member {0} is unreachable'.format(a['name']))
            else:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    '"{0}" is not recognized as a collective member'.format(
                        manager))

        return
    if not util.cert_matches(a['fingerprint'],
                             remote.getpeercert(binary_form=True)):
        raise Exception("Invalid certificate on peer")
    banner = tlvdata.recv(remote)
    vers = banner.split()[2]
    if vers == b'v0':
        pvers = 2
    elif vers == b'v1':
        pvers = 4
    if sys.version_info[0] < 3:
        pvers = 2
    tlvdata.recv(remote)
    myname = collective.get_myname()
    dreq = b'\x01\x03' + msgpack.packb(
        {
            'name': myname,
            'nodes': list(nodes),
            'path': element,
            'tenant': configmanager.tenant,
            'operation': operation,
            'inputdata': inputdata,
            'isnoderange': isnoderange
        },
        use_bin_type=False)
    tlvdata.send(remote, {'dispatch': {'name': myname, 'length': len(dreq)}})
    remote.sendall(dreq)
    while True:
        try:
            rlen = remote.recv(8)
        except Exception:
            for node in nodes:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    'Collective member {0} went unreachable'.format(a['name']))
            return
        while len(rlen) < 8:
            try:
                nlen = remote.recv(8 - len(rlen))
            except Exception:
                nlen = 0
            if not nlen:
                for node in nodes:
                    yield msg.ConfluentResourceUnavailable(
                        node, 'Collective member {0} went unreachable'.format(
                            a['name']))
                return
            rlen += nlen
        rlen = struct.unpack('!Q', rlen)[0]
        if rlen == 0:
            break
        try:
            rsp = remote.recv(rlen)
        except Exception:
            for node in nodes:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    'Collective member {0} went unreachable'.format(a['name']))
            return
        while len(rsp) < rlen:
            try:
                nrsp = remote.recv(rlen - len(rsp))
            except Exception:
                nrsp = 0
            if not nrsp:
                for node in nodes:
                    yield msg.ConfluentResourceUnavailable(
                        node, 'Collective member {0} went unreachable'.format(
                            a['name']))
                return
            rsp += nrsp
        try:
            rsp = msg.msg_deserialize(rsp)
        except Exception:
            rsp = exc.deserialize_exc(rsp)
        if isinstance(rsp, Exception):
            raise rsp
        if not rsp:
            raise Exception(
                'Error in cross-collective serialize/deserialze, see remote logs'
            )
        yield rsp
Esempio n. 13
0
def snoop(handler, byehandler=None, protocol=None, uuidlookup=None):
    """Watch for SSDP notify messages

    The handler shall be called on any service coming online.
    byehandler is called whenever a system advertises that it is departing.
    If no byehandler is specified, byebye messages are ignored.  The handler is
    given (as possible), the mac address, a list of viable sockaddrs to reference
    the peer, and the notification type (e.g.
    'urn:dmtf-org:service:redfish-rest:1'

    :param handler:  A handler for online notifications from network
    :param byehandler: Optional handler for devices going off the network
    """
    # Normally, I like using v6/v4 agnostic socket. However, since we are
    # dabbling in multicast wizardry here, such sockets can cause big problems,
    # so we will have two distinct sockets
    tracelog = log.Logger('trace')
    known_peers = set([])
    net6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
    net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
    for ifidx in util.list_interface_indexes():
        v6grp = ssdp6mcast + struct.pack('=I', ifidx)
        net6.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, v6grp)
    net6.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    net4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    for i4 in util.list_ips():
        ssdp4mcast = socket.inet_pton(socket.AF_INET, mcastv4addr) + \
                     socket.inet_aton(i4['addr'])
        try:
            net4.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP,
                            ssdp4mcast)
        except socket.error as e:
            if e.errno != 98:
                # errno 98 can happen if aliased, skip for now
                raise
    net4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    net4.bind(('', 1900))
    net6.bind(('', 1900))
    peerbymacaddress = {}
    while True:
        try:
            newmacs = set([])
            machandlers = {}
            r, _, _ = select.select((net4, net6), (), (), 60)
            while r:
                for s in r:
                    (rsp, peer) = s.recvfrom(9000)
                    if rsp[:4] == b'PING':
                        continue
                    rsp = rsp.split(b'\r\n')
                    method, _, _ = rsp[0].split(b' ', 2)
                    if method == b'NOTIFY':
                        ip = peer[0].partition('%')[0]
                        if peer in known_peers:
                            continue
                        if ip not in neighutil.neightable:
                            neighutil.update_neigh()
                        if ip not in neighutil.neightable:
                            continue
                        mac = neighutil.neightable[ip]
                        known_peers.add(peer)
                        newmacs.add(mac)
                        if mac in peerbymacaddress:
                            peerbymacaddress[mac]['addresses'].append(peer)
                        else:
                            peerbymacaddress[mac] = {
                                'hwaddr': mac,
                                'addresses': [peer],
                            }
                            peerdata = peerbymacaddress[mac]
                            for headline in rsp[1:]:
                                if not headline:
                                    continue
                                headline = util.stringify(headline)
                                header, _, value = headline.partition(':')
                                header = header.strip()
                                value = value.strip()
                                if header == 'NT':
                                    peerdata['service'] = value
                                elif header == 'NTS':
                                    if value == 'ssdp:byebye':
                                        machandlers[mac] = byehandler
                                    elif value == 'ssdp:alive':
                                        machandlers[mac] = None  # handler
                    elif method == b'M-SEARCH':
                        if not uuidlookup:
                            continue
                        #ip = peer[0].partition('%')[0]
                        for headline in rsp[1:]:
                            if not headline:
                                continue
                            headline = util.stringify(headline)
                            headline = headline.partition(':')
                            if len(headline) < 3:
                                continue
                            if headline[0] == 'ST' and headline[-1].startswith(
                                    ' urn:xcat.org:service:confluent:'):
                                try:
                                    cfm.check_quorum()
                                except Exception:
                                    continue
                                for query in headline[-1].split('/'):
                                    if query.startswith('uuid='):
                                        curruuid = query.split('=',
                                                               1)[1].lower()
                                        node = uuidlookup(curruuid)
                                        if not node:
                                            break
                                        # Do not bother replying to a node that
                                        # we have no deployment activity
                                        # planned for
                                        cfg = cfm.ConfigManager(None)
                                        cfd = cfg.get_node_attributes(
                                            node, [
                                                'deployment.pendingprofile',
                                                'collective.managercandidates'
                                            ])
                                        if not cfd.get(node, {}).get(
                                                'deployment.pendingprofile',
                                            {}).get('value', None):
                                            break
                                        candmgrs = cfd.get(node, {}).get(
                                            'collective.managercandidates',
                                            {}).get('value', None)
                                        if candmgrs:
                                            candmgrs = noderange.NodeRange(
                                                candmgrs, cfg).nodes
                                            if collective.get_myname(
                                            ) not in candmgrs:
                                                break
                                        currtime = time.time()
                                        seconds = int(currtime)
                                        msecs = int(currtime * 1000 % 1000)
                                        reply = 'HTTP/1.1 200 OK\r\nNODENAME: {0}\r\nCURRTIME: {1}\r\nCURRMSECS: {2}\r\n'.format(
                                            node, seconds, msecs)
                                        if '%' in peer[0]:
                                            iface = peer[0].split('%', 1)[1]
                                            reply += 'MGTIFACE: {0}\r\n'.format(
                                                peer[0].split('%', 1)[1])
                                            ncfg = netutil.get_nic_config(
                                                cfg, node, ifidx=iface)
                                            if ncfg.get(
                                                    'matchesnodename', None):
                                                reply += 'DEFAULTNET: 1\r\n'
                                        elif not netutil.address_is_local(
                                                peer[0]):
                                            continue
                                        if not isinstance(reply, bytes):
                                            reply = reply.encode('utf8')
                                        s.sendto(reply, peer)
                r, _, _ = select.select((net4, net6), (), (), 0.2)
            for mac in newmacs:
                thehandler = machandlers.get(mac, None)
                if thehandler:
                    thehandler(peerbymacaddress[mac])
        except Exception:
            tracelog.log(traceback.format_exc(),
                         ltype=log.DataTypes.event,
                         event=log.Events.stacktrace)
Esempio n. 14
0
def dispatch_request(nodes, manager, element, configmanager, inputdata,
                     operation):
    a = configmanager.get_collective_member(manager)
    try:
        remote = socket.create_connection((a['address'], 13001))
        remote.settimeout(90)
        remote = ssl.wrap_socket(remote,
                                 cert_reqs=ssl.CERT_NONE,
                                 keyfile='/etc/confluent/privkey.pem',
                                 certfile='/etc/confluent/srvcert.pem')
    except Exception:
        for node in nodes:
            if a:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    'Collective member {0} is unreachable'.format(a['name']))
            else:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    '"{0}" is not recognized as a collective member'.format(
                        manager))

        return
    if not util.cert_matches(a['fingerprint'],
                             remote.getpeercert(binary_form=True)):
        raise Exception("Invalid certificate on peer")
    tlvdata.recv(remote)
    tlvdata.recv(remote)
    myname = collective.get_myname()
    dreq = pickle.dumps({
        'name': myname,
        'nodes': list(nodes),
        'path': element,
        'tenant': configmanager.tenant,
        'operation': operation,
        'inputdata': inputdata
    })
    tlvdata.send(remote, {'dispatch': {'name': myname, 'length': len(dreq)}})
    remote.sendall(dreq)
    while True:
        try:
            rlen = remote.recv(8)
        except Exception:
            for node in nodes:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    'Collective member {0} went unreachable'.format(a['name']))
            return
        while len(rlen) < 8:
            try:
                nlen = remote.recv(8 - len(rlen))
            except Exception:
                nlen = 0
            if not nlen:
                for node in nodes:
                    yield msg.ConfluentResourceUnavailable(
                        node, 'Collective member {0} went unreachable'.format(
                            a['name']))
                return
            rlen += nlen
        rlen = struct.unpack('!Q', rlen)[0]
        if rlen == 0:
            break
        try:
            rsp = remote.recv(rlen)
        except Exception:
            for node in nodes:
                yield msg.ConfluentResourceUnavailable(
                    node,
                    'Collective member {0} went unreachable'.format(a['name']))
            return
        while len(rsp) < rlen:
            try:
                nrsp = remote.recv(rlen - len(rsp))
            except Exception:
                nrsp = 0
            if not nrsp:
                for node in nodes:
                    yield msg.ConfluentResourceUnavailable(
                        node, 'Collective member {0} went unreachable'.format(
                            a['name']))
                return
            rsp += nrsp
        rsp = pickle.loads(rsp)
        if isinstance(rsp, Exception):
            raise rsp
        yield rsp
Esempio n. 15
0
        os.chown('/var/lib/confluent/public/site', neededuid, -1)
        os.chown('/var/lib/confluent/public/site/ssh', neededuid, -1)
    except OSError as e:
        if e.errno != 17:
            raise
    neededuid = os.stat('/etc/confluent').st_uid
    if automation:
        suffix = 'automationpubkey'
    else:
        suffix = 'rootpubkey'
    for auth in authorized:
        shutil.copy(
            auth,
            '/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
                    myname, suffix))
        os.chmod('/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
                myname, suffix), 0o644)
        os.chown('/var/lib/confluent/public/site/ssh/{0}.{1}'.format(
                myname, suffix), neededuid, -1)


def ca_exists():
    return os.path.exists('/etc/confluent/ssh/ca')


if __name__ == '__main__':
    initialize_root_key(True)
    if not ca_exists():
        initialize_ca()
    print(repr(sign_host_key(open('/etc/ssh/ssh_host_ed25519_key.pub').read(), collective.get_myname())))
Esempio n. 16
0
        os.chown('/var/lib/confluent/public/site/ssh', neededuid, -1)
    except OSError as e:
        if e.errno != 17:
            raise
    neededuid = os.stat('/etc/confluent').st_uid
    for auth in authorized:
        shutil.copy(
            auth,
            '/var/lib/confluent/public/site/ssh/{0}.rootpubkey'.format(myname))
        os.chmod(
            '/var/lib/confluent/public/site/ssh/{0}.rootpubkey'.format(myname),
            0o644)
        os.chown(
            '/var/lib/confluent/public/site/ssh/{0}.rootpubkey'.format(myname),
            neededuid, -1)


def ca_exists():
    return os.path.exists('/etc/confluent/ssh/ca')


if __name__ == '__main__':
    initialize_root_key(True)
    if not ca_exists():
        initialize_ca()
    print(
        repr(
            sign_host_key(
                open('/etc/ssh/ssh_host_ed25519_key.pub').read(),
                collective.get_myname())))
Esempio n. 17
0
def handle_request(env, start_response):
    global currtz
    global keymap
    global currlocale
    global currtzvintage
    configmanager.check_quorum()
    nodename = env.get('HTTP_CONFLUENT_NODENAME', None)
    apikey = env.get('HTTP_CONFLUENT_APIKEY', None)
    if not (nodename and apikey):
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    cfg = configmanager.ConfigManager(None)
    eak = cfg.get_node_attributes(nodename, 'crypted.selfapikey').get(
        nodename, {}).get('crypted.selfapikey', {}).get('hashvalue', None)
    if not eak:
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    salt = '$'.join(eak.split('$', 3)[:-1]) + '$'
    if crypt.crypt(apikey, salt) != eak:
        start_response('401 Unauthorized', [])
        yield 'Unauthorized'
        return
    retype = env.get('HTTP_ACCEPT', 'application/yaml')
    isgeneric = False
    if retype == '*/*':
        isgeneric = True
        retype = 'application/yaml'
    if retype == 'application/yaml':
        dumper = yamldump
    elif retype == 'application/json':
        dumper = json.dumps
    else:
        start_response('406 Not supported', [])
        yield 'Unsupported content type in ACCEPT: ' + retype
        return
    if env['REQUEST_METHOD'] not in (
            'HEAD', 'GET') and 'CONTENT_LENGTH' in env and int(
                env['CONTENT_LENGTH']) > 0:
        reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH']))
    if env['PATH_INFO'] == '/self/deploycfg':
        if 'HTTP_CONFLUENT_MGTIFACE' in env:
            ncfg = netutil.get_nic_config(cfg,
                                          nodename,
                                          ifidx=env['HTTP_CONFLUENT_MGTIFACE'])
        else:
            myip = env.get('HTTP_X_FORWARDED_HOST', None)
            if ']' in myip:
                myip = myip.split(']', 1)[0]
            else:
                myip = myip.split(':', 1)[0]
            myip = myip.replace('[', '').replace(']', '')
            ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip)
        if ncfg['prefix']:
            ncfg['ipv4_netmask'] = netutil.cidr_to_mask(ncfg['prefix'])
        deployinfo = cfg.get_node_attributes(
            nodename, ('deployment.*', 'console.method', 'crypted.*', 'dns.*'))
        deployinfo = deployinfo.get(nodename, {})
        profile = deployinfo.get('deployment.pendingprofile',
                                 {}).get('value', '')
        ncfg['encryptboot'] = deployinfo.get('deployment.encryptboot',
                                             {}).get('value', None)
        if ncfg['encryptboot'] in ('', 'none'):
            ncfg['encryptboot'] = None
        ncfg['profile'] = profile
        protocol = deployinfo.get('deployment.useinsecureprotocols',
                                  {}).get('value', 'never')
        ncfg['textconsole'] = bool(
            deployinfo.get('console.method', {}).get('value', None))
        if protocol == 'always':
            ncfg['protocol'] = 'http'
        else:
            ncfg['protocol'] = 'https'
        ncfg['rootpassword'] = deployinfo.get('crypted.rootpassword',
                                              {}).get('hashvalue', None)
        ncfg['grubpassword'] = deployinfo.get('crypted.grubpassword',
                                              {}).get('grubhashvalue', None)
        if currtzvintage and currtzvintage > (time.time() - 30.0):
            ncfg['timezone'] = currtz
        else:
            langinfo = subprocess.check_output(['localectl',
                                                'status']).split(b'\n')
            for line in langinfo:
                line = line.strip()
                if line.startswith(b'System Locale:'):
                    ccurrlocale = line.split(b'=')[-1]
                    if not ccurrlocale:
                        continue
                    if not isinstance(ccurrlocale, str):
                        ccurrlocale = ccurrlocale.decode('utf8')
                    if ccurrlocale == 'n/a':
                        continue
                    currlocale = ccurrlocale
                elif line.startswith(b'VC Keymap:'):
                    ckeymap = line.split(b':')[-1]
                    ckeymap = ckeymap.strip()
                    if not ckeymap:
                        continue
                    if not isinstance(ckeymap, str):
                        ckeymap = ckeymap.decode('utf8')
                    if ckeymap == 'n/a':
                        continue
                    keymap = ckeymap
            tdc = subprocess.check_output(['timedatectl']).split(b'\n')
            for ent in tdc:
                ent = ent.strip()
                if ent.startswith(b'Time zone:'):
                    currtz = ent.split(b': ', 1)[1].split(b'(', 1)[0].strip()
                    if not isinstance(currtz, str):
                        currtz = currtz.decode('utf8')
                    currtzvintage = time.time()
                    ncfg['timezone'] = currtz
                    break
        ncfg['locale'] = currlocale
        ncfg['keymap'] = keymap
        ncfg['nameservers'] = []
        for dns in deployinfo.get('dns.servers', {}).get('value',
                                                         '').split(','):
            ncfg['nameservers'].append(dns)
        dnsdomain = deployinfo.get('dns.domain', {}).get('value', None)
        ncfg['dnsdomain'] = dnsdomain
        start_response('200 OK', (('Content-Type', retype), ))
        yield dumper(ncfg)
    elif env['PATH_INFO'] == '/self/sshcert':
        if not sshutil.ca_exists():
            start_response('500 Unconfigured', ())
            yield 'CA is not configured on this system (run ...)'
            return
        dnsinfo = cfg.get_node_attributes(nodename, ('dns.*'))
        dnsinfo = dnsinfo.get(nodename, {}).get('dns.domain',
                                                {}).get('value', None)
        if dnsinfo in nodename:
            dnsinfo = ''
        cert = sshutil.sign_host_key(reqbody, nodename, [dnsinfo])
        start_response('200 OK', (('Content-Type', 'text/plain'), ))
        yield cert
    elif env['PATH_INFO'] == '/self/nodelist':
        nodes = set(cfg.list_nodes())
        domaininfo = cfg.get_node_attributes(nodes, 'dns.domain')
        for node in list(util.natural_sort(nodes)):
            domain = domaininfo.get(node, {}).get('dns.domain',
                                                  {}).get('value', None)
            if domain and domain not in node:
                nodes.add('{0}.{1}'.format(node, domain))
        for mgr in configmanager.list_collective():
            nodes.add(mgr)
            if domain and domain not in mgr:
                nodes.add('{0}.{1}'.format(mgr, domain))
        myname = collective.get_myname()
        nodes.add(myname)
        if domain and domain not in myname:
            nodes.add('{0}.{1}'.format(myname, domain))
        if isgeneric:
            start_response('200 OK', (('Content-Type', 'text/plain'), ))
            for node in util.natural_sort(nodes):
                yield node + '\n'
        else:
            start_response('200 OK', (('Content-Type', retype), ))
            yield dumper(sorted(nodes))
    elif env['PATH_INFO'] == '/self/updatestatus':
        update = yaml.safe_load(reqbody)
        if update['status'] == 'staged':
            targattr = 'deployment.stagedprofile'
        elif update['status'] == 'complete':
            targattr = 'deployment.profile'
        else:
            raise Exception('Unknown update status request')
        currattr = cfg.get_node_attributes(nodename,
                                           'deployment.*').get(nodename, {})
        pending = None
        if targattr == 'deployment.profile':
            pending = currattr.get('deployment.stagedprofile',
                                   {}).get('value', '')
        if not pending:
            pending = currattr.get('deployment.pendingprofile',
                                   {}).get('value', '')
        updates = {}
        if pending:
            updates['deployment.pendingprofile'] = {'value': ''}
            if targattr == 'deployment.profile':
                updates['deployment.stagedprofile'] = {'value': ''}
            currprof = currattr.get(targattr, {}).get('value', '')
            if currprof != pending:
                updates[targattr] = {'value': pending}
            cfg.set_node_attributes({nodename: updates})
            start_response('200 OK', (('Content-Type', 'text/plain'), ))
            yield 'OK'
        else:
            start_response('500 Error', (('Content-Type', 'text/plain'), ))
            yield 'No pending profile detected, unable to accept status update'
    else:
        start_response('404 Not Found', ())
        yield 'Not found'