Beispiel #1
0
def aaa_util_reconfigure():
    '''
    Check system initialization, add default rows and update files accordingly
    based on the values in DB
    '''

    global system_initialized
    global default_row_initialized

    if system_initialized == 0:
        rc = system_is_configured()
        if rc is False:
            return

    if default_row_initialized == 0:
        check_for_row_initialization()

    update_ssh_config_file()

    get_source_interface(AAA_TACACS)
    get_source_interface(AAA_RADIUS)
    vlog.info("tacacs_src_interface = %s, radius_src_interface = %s, " \
        "tacacs_source_address = %s, radius_source_address = %s" \
        % (str(tacacs_source_interface), str(radius_source_interface), \
        str(tacacs_source_ip), str(radius_source_ip)))

    # TODO: For now we're calling the functionality to configure
    # TACACS+ PAM config files after all RADIUS config is done
    # This way we can still test RADIUS by not configuring TACACS+
    # To unconfigure TACACS+ for now, just use -
    # no aaa authentication login default
    server_list = get_server_list("default")
    modify_common_auth_access_file(server_list)

    return
Beispiel #2
0
    def sync_pods(self, pods):
        expected_logical_ports = set()
        pods = pods.get('items', [])
        for pod in pods:
            pod_name = pod['metadata']['name']
            namespace = pod['metadata']['namespace']
            logical_port = "%s_%s" % (namespace, pod_name)
            annotations = pod['metadata']['annotations']
            expected_logical_ports.add(logical_port)

            # We should sync the container port names as there are no
            # guarantees that a endpoint creation event will become after
            # all the pods creation events.
            ip_address = self._get_ip_address_from_annotations(annotations)
            if ip_address:
                self._add_k8s_l4_port_name_cache(pod, ip_address)

        try:
            existing_logical_ports = ovn_nbctl(
                "--data=bare", "--no-heading", "--columns=name", "find",
                "logical_switch_port", "external-ids:pod=true").split()
            existing_logical_ports = set(existing_logical_ports)
        except Exception as e:
            vlog.err("sync_pods: find failed %s" % (str(e)))
            return

        for logical_port in existing_logical_ports - expected_logical_ports:
            try:
                ovn_nbctl("--if-exists", "lsp-del", logical_port)
            except Exception as e:
                vlog.err("sync_pods: failed to delete logical_port %s" %
                         (logical_port))
                continue

            vlog.info("sync_pods: Deleted logical port %s" % (logical_port))
Beispiel #3
0
    def sync_pods(self, pods):
        expected_logical_ports = set()
        pods = pods.get('items', [])
        for pod in pods:
            pod_name = pod['metadata']['name']
            namespace = pod['metadata']['namespace']
            logical_port = "%s_%s" % (namespace, pod_name)
            expected_logical_ports.add(logical_port)

        try:
            existing_logical_ports = ovn_nbctl("--data=bare", "--no-heading",
                                               "--columns=name", "find",
                                               "logical_switch_port",
                                               "external_id:pod=true").split()
            existing_logical_ports = set(existing_logical_ports)
        except Exception as e:
            vlog.err("sync_pods: find failed %s" % (str(e)))
            return

        for logical_port in existing_logical_ports - expected_logical_ports:
            try:
                ovn_nbctl("--if-exists", "lsp-del", logical_port)
            except Exception as e:
                vlog.err("sync_pods: failed to delete logical_port %s" %
                         (logical_port))
                continue

            vlog.info("sync_pods: Deleted logical port %s" % (logical_port))
Beispiel #4
0
def _get_api_params():
    ca_certificate = None
    api_token = None
    if not variables.K8S_API_SERVER:
        k8s_api_server = ovs_vsctl("--if-exists", "get", "Open_vSwitch", ".",
                                   "external_ids:k8s-api-server").strip('"')
    else:
        k8s_api_server = variables.K8S_API_SERVER

    if k8s_api_server.startswith("https://"):
        if not path.isfile(CA_CERTIFICATE):
            vlog.info("Going to look for k8s-ca-certificate in OVSDB")
            k8s_ca_crt = ovs_vsctl("--if-exists", "get", "Open_vSwitch",
                                   ".", "external_ids:k8s-ca-certificate"
                                   ).strip('"')
            if k8s_ca_crt:
                k8s_ca_crt = k8s_ca_crt.replace("\\n", "\n")
                ca_file = open(CA_CERTIFICATE, 'w+')
                ca_file.write(k8s_ca_crt)
                ca_certificate = CA_CERTIFICATE
        else:
            ca_certificate = CA_CERTIFICATE

    k8s_api_token = ovs_vsctl("--if-exists", "get", "Open_vSwitch", ".",
                              "external_ids:k8s-api-token").strip('"')
    if k8s_api_token:
        api_token = k8s_api_token

    return ca_certificate, api_token
Beispiel #5
0
    def run(self):
        if self.pstream is not None:
            error, stream = self.pstream.accept()
            if error == 0:
                if self.rpc or self.stream:
                    # XXX rate-limit
                    vlog.info("%s: new connection replacing active "
                              "connection" % self.reconnect.get_name())
                    self.__disconnect()
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(stream)
            elif error != errno.EAGAIN:
                self.reconnect.listen_error(ovs.timeval.msec(), error)
                self.pstream.close()
                self.pstream = None

        if self.rpc:
            backlog = self.rpc.get_backlog()
            self.rpc.run()
            if self.rpc.get_backlog() < backlog:
                # Data previously caught in a queue was successfully sent (or
                # there's an error, which we'll catch below).
                #
                # We don't count data that is successfully sent immediately as
                # activity, because there's a lot of queuing downstream from
                # us, which means that we can push a lot of data into a
                # connection that has stalled and won't ever recover.
                self.reconnect.activity(ovs.timeval.msec())

            error = self.rpc.get_status()
            if error != 0:
                self.reconnect.disconnected(ovs.timeval.msec(), error)
                self.__disconnect()
                self.pick_remote()
        elif self.stream is not None:
            self.stream.run()
            error = self.stream.connect()
            if error == 0:
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(self.stream)
                self.stream = None
            elif error != errno.EAGAIN:
                self.reconnect.connect_failed(ovs.timeval.msec(), error)
                self.pick_remote()
                self.stream.close()
                self.stream = None

        action = self.reconnect.run(ovs.timeval.msec())
        if action == ovs.reconnect.CONNECT:
            self.__connect()
        elif action == ovs.reconnect.DISCONNECT:
            self.reconnect.disconnected(ovs.timeval.msec(), 0)
            self.__disconnect()
        elif action == ovs.reconnect.PROBE:
            if self.rpc:
                request = Message.create_request("echo", [])
                request.id = "echo"
                self.rpc.send(request)
        else:
            assert action is None
Beispiel #6
0
    def sync_pods(self, pods):
        expected_logical_ports = set()
        pods = pods.get('items', [])
        for pod in pods:
            pod_name = pod['metadata']['name']
            namespace = pod['metadata']['namespace']
            logical_port = "%s_%s" % (namespace, pod_name)
            expected_logical_ports.add(logical_port)

        try:
            existing_logical_ports = ovn_nbctl(
                                "--data=bare", "--no-heading",
                                "--columns=name", "find",
                                "logical_switch_port",
                                "external_id:pod=true").split()
            existing_logical_ports = set(existing_logical_ports)
        except Exception as e:
            vlog.err("sync_pods: find failed %s" % (str(e)))
            return

        for logical_port in existing_logical_ports - expected_logical_ports:
            try:
                ovn_nbctl("--if-exists", "lsp-del", logical_port)
            except Exception as e:
                vlog.err("sync_pods: failed to delete logical_port %s"
                         % (logical_port))
                continue

            vlog.info("sync_pods: Deleted logical port %s"
                      % (logical_port))
Beispiel #7
0
def get_config(idl_cfg):
    '''
    Walk through the rows in the config table (if any)
    looking for a row with type == startup.

    If found, set global variable saved_config to the content
    of the "config" field in that row.
    '''

    global saved_config

    #Note: You can't tell the difference between the config table not
    #      existing (that is the configdb is not there) or just that there
    #      are no rows in the config table.
    tbl_found = False
    for ovs_rec in idl_cfg.tables["config"].rows.itervalues():
        tbl_found = True
        if ovs_rec.type:
            if ovs_rec.type == type_startup_config:
                if ovs_rec.config:
                    saved_config = ovs_rec.config
                else:
                    vlog.warn("startup config row does not have config column")
                return

    if not tbl_found:
        vlog.info("No rows found in the config table")
Beispiel #8
0
def _get_api_params():
    ca_certificate = None
    api_token = None
    if not variables.K8S_API_SERVER:
        k8s_api_server = ovs_vsctl("--if-exists", "get", "Open_vSwitch", ".",
                                   "external_ids:k8s-api-server").strip('"')
    else:
        k8s_api_server = variables.K8S_API_SERVER

    if k8s_api_server.startswith("https://"):
        if not path.isfile(CA_CERTIFICATE):
            vlog.info("Going to look for k8s-ca-certificate in OVSDB")
            k8s_ca_crt = ovs_vsctl("--if-exists", "get", "Open_vSwitch",
                                   ".", "external_ids:k8s-ca-certificate"
                                   ).strip('"')
            if k8s_ca_crt:
                k8s_ca_crt = k8s_ca_crt.replace("\\n", "\n")
                ca_file = open(CA_CERTIFICATE, 'w+')
                ca_file.write(k8s_ca_crt)
                ca_certificate = CA_CERTIFICATE
        else:
            ca_certificate = CA_CERTIFICATE

    k8s_api_token = ovs_vsctl("--if-exists", "get", "Open_vSwitch", ".",
                              "external_ids:k8s-api-token").strip('"')
    if k8s_api_token:
        api_token = k8s_api_token

    return ca_certificate, api_token
Beispiel #9
0
    def run(self):
        if self.pstream is not None:
            error, stream = self.pstream.accept()
            if error == 0:
                if self.rpc or self.stream:
                    # XXX rate-limit
                    vlog.info("%s: new connection replacing active "
                              "connection" % self.reconnect.get_name())
                    self.__disconnect()
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(stream)
            elif error != errno.EAGAIN:
                self.reconnect.listen_error(ovs.timeval.msec(), error)
                self.pstream.close()
                self.pstream = None

        if self.rpc:
            backlog = self.rpc.get_backlog()
            self.rpc.run()
            if self.rpc.get_backlog() < backlog:
                # Data previously caught in a queue was successfully sent (or
                # there's an error, which we'll catch below).
                #
                # We don't count data that is successfully sent immediately as
                # activity, because there's a lot of queuing downstream from
                # us, which means that we can push a lot of data into a
                # connection that has stalled and won't ever recover.
                self.reconnect.activity(ovs.timeval.msec())

            error = self.rpc.get_status()
            if error != 0:
                self.reconnect.disconnected(ovs.timeval.msec(), error)
                self.__disconnect()
        elif self.stream is not None:
            self.stream.run()
            error = self.stream.connect()
            if error == 0:
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(self.stream)
                self.stream = None
            elif error != errno.EAGAIN:
                self.reconnect.connect_failed(ovs.timeval.msec(), error)
                self.pick_remote()
                self.stream.close()
                self.stream = None

        action = self.reconnect.run(ovs.timeval.msec())
        if action == ovs.reconnect.CONNECT:
            self.__connect()
        elif action == ovs.reconnect.DISCONNECT:
            self.reconnect.disconnected(ovs.timeval.msec(), 0)
            self.__disconnect()
        elif action == ovs.reconnect.PROBE:
            if self.rpc:
                request = Message.create_request("echo", [])
                request.id = "echo"
                self.rpc.send(request)
        else:
            assert action is None
Beispiel #10
0
 def _delete_load_balancer_vip(self, load_balancer, vip):
     # Remove the 'vip' from the 'load_balancer'.
     try:
         ovn_nbctl("remove", "load_balancer", load_balancer, "vips", vip)
         vlog.info("deleted vip %s from %s" % (vip, load_balancer))
     except Exception as e:
         vlog.err("_delete_load_balancer_vip: failed to remove vip %s "
                  " from %s (%s)" % (vip, load_balancer, str(e)))
Beispiel #11
0
 def _delete_load_balancer_vip(self, load_balancer, vip):
     # Remove the 'vip' from the 'load_balancer'.
     try:
         ovn_nbctl("remove", "load_balancer", load_balancer, "vips", vip)
         vlog.info("deleted vip %s from %s" % (vip, load_balancer))
     except Exception as e:
         vlog.err("_delete_load_balancer_vip: failed to remove vip %s "
                  " from %s (%s)" % (vip, load_balancer, str(e)))
Beispiel #12
0
def main():
    parser = argparse.ArgumentParser()
    ovs.vlog.add_args(parser)
    args = parser.parse_args()
    ovs.vlog.handle_args(args)

    model = UserGroupDataModel()

    vlog.info("Starting AD sync service")
    while True:
        model.update_from_ad()
        time.sleep(3)
Beispiel #13
0
def main():
    parser = argparse.ArgumentParser()
    ovs.vlog.add_args(parser)
    args = parser.parse_args()
    ovs.vlog.handle_args(args)

    model = UserGroupDataModel()

    vlog.info("Starting AD sync service")
    while True:
        model.update_from_ad()
        time.sleep(3)
Beispiel #14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--http_listen_port", default=DEFAULT_HTTP_PORT)
    parser.add_argument("--http_listen_addr", default=DEFAULT_HTTP_ADDR)
    ovs.vlog.add_args(parser)
    ovs.daemon.add_args(parser)

    args = parser.parse_args()
    ovs.vlog.handle_args(args)
    ovs.daemon.handle_args(args)

    wsgi_server = Server("API Broker")
    api = ApiApplication()

    tables_model = SimpleDataModel()
    table_collection_handler = CollectionHandler('/tables', tables_model)
    api.register_handler(table_collection_handler)
    table_element_handler = ElementHandler('/tables/([^/]+)', tables_model,
                                           table_collection_handler)
    api.register_handler(table_element_handler)

    rows_model = SimpleDataModel()
    #TODO: scope model per table
    rows_collection_handler = RowCollectionHandler('/tables/([^/]+)/rows',
                                                   rows_model)
    api.register_handler(rows_collection_handler)
    rows_element_handler = RowElementHandler('/tables/([^/]+)/rows/([^/]+)',
                                             rows_model,
                                             rows_collection_handler)
    api.register_handler(rows_element_handler)

    policy_model = PolicyDataModel()
    policy_element_handler = ElementHandler('/policy', policy_model)
    api.register_handler(policy_element_handler)

    ad_model = UserGroupDataModel()

    def ad_update_thread():
        while True:
            ad_model.update_from_ad()  # XXX: blocks eventlet
            time.sleep(3)

    wsgi_server.pool.spawn_n(ad_update_thread)

    ad_row_handler = CollectionHandler('/tables/ad-groups/rows', ad_model)
    api.register_handler(ad_row_handler, 0)
    # Add static tables to model
    tables_model.add_item({'sample': 'schema', 'id': 'ad-groups'}, 'ad-groups')

    vlog.info("Starting congress server")
    wsgi_server.start(api, args.http_listen_port, args.http_listen_addr)
    wsgi_server.wait()
Beispiel #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--http_listen_port", default=DEFAULT_HTTP_PORT)
    parser.add_argument("--http_listen_addr", default=DEFAULT_HTTP_ADDR)
    ovs.vlog.add_args(parser)
    ovs.daemon.add_args(parser)

    args = parser.parse_args()
    ovs.vlog.handle_args(args)
    ovs.daemon.handle_args(args)

    wsgi_server = Server("API Broker")
    api = ApiApplication()

    tables_model = SimpleDataModel()
    table_collection_handler = CollectionHandler("/tables", tables_model)
    api.register_handler(table_collection_handler)
    table_element_handler = ElementHandler("/tables/([^/]+)", tables_model, table_collection_handler)
    api.register_handler(table_element_handler)

    rows_model = SimpleDataModel()
    # TODO: scope model per table
    rows_collection_handler = RowCollectionHandler("/tables/([^/]+)/rows", rows_model)
    api.register_handler(rows_collection_handler)
    rows_element_handler = RowElementHandler("/tables/([^/]+)/rows/([^/]+)", rows_model, rows_collection_handler)
    api.register_handler(rows_element_handler)

    policy_model = PolicyDataModel()
    policy_element_handler = ElementHandler("/policy", policy_model)
    api.register_handler(policy_element_handler)

    ad_model = UserGroupDataModel()

    def ad_update_thread():
        while True:
            ad_model.update_from_ad()  # XXX: blocks eventlet
            time.sleep(3)

    wsgi_server.pool.spawn_n(ad_update_thread)

    ad_row_handler = CollectionHandler("/tables/ad-groups/rows", ad_model)
    api.register_handler(ad_row_handler, 0)
    # Add static tables to model
    tables_model.add_item({"sample": "schema", "id": "ad-groups"}, "ad-groups")

    vlog.info("Starting congress server")
    wsgi_server.start(api, args.http_listen_port, args.http_listen_addr)
    wsgi_server.wait()
Beispiel #16
0
    def run(self):
        if self.pstream is not None:
            error, stream = self.pstream.accept()
            if error == 0:
                if self.rpc or self.stream:
                    # XXX rate-limit
                    vlog.info("%s: new connection replacing active "
                              "connection" % self.reconnect.get_name())
                    self.__disconnect()
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(stream)
            elif error != errno.EAGAIN:
                self.reconnect.listen_error(ovs.timeval.msec(), error)
                self.pstream.close()
                self.pstream = None

        if self.rpc:
            self.rpc.run()
            error = self.rpc.get_status()
            if error != 0:
                self.reconnect.disconnected(ovs.timeval.msec(), error)
                self.__disconnect()
        elif self.stream is not None:
            self.stream.run()
            error = self.stream.connect()
            if error == 0:
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(self.stream)
                self.stream = None
            elif error != errno.EAGAIN:
                self.reconnect.connect_failed(ovs.timeval.msec(), error)
                self.stream.close()
                self.stream = None

        action = self.reconnect.run(ovs.timeval.msec())
        if action == ovs.reconnect.CONNECT:
            self.__connect()
        elif action == ovs.reconnect.DISCONNECT:
            self.reconnect.disconnected(ovs.timeval.msec(), 0)
            self.__disconnect()
        elif action == ovs.reconnect.PROBE:
            if self.rpc:
                request = Message.create_request("echo", [])
                request.id = "echo"
                self.rpc.send(request)
        else:
            assert action == None
Beispiel #17
0
    def run(self):
        if self.pstream is not None:
            error, stream = self.pstream.accept()
            if error == 0:
                if self.rpc or self.stream:
                    # XXX rate-limit
                    vlog.info("%s: new connection replacing active "
                              "connection" % self.reconnect.get_name())
                    self.__disconnect()
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(stream)
            elif error != errno.EAGAIN:
                self.reconnect.listen_error(ovs.timeval.msec(), error)
                self.pstream.close()
                self.pstream = None

        if self.rpc:
            self.rpc.run()
            error = self.rpc.get_status()
            if error != 0:
                self.reconnect.disconnected(ovs.timeval.msec(), error)
                self.__disconnect()
        elif self.stream is not None:
            self.stream.run()
            error = self.stream.connect()
            if error == 0:
                self.reconnect.connected(ovs.timeval.msec())
                self.rpc = Connection(self.stream)
                self.stream = None
            elif error != errno.EAGAIN:
                self.reconnect.connect_failed(ovs.timeval.msec(), error)
                self.stream.close()
                self.stream = None

        action = self.reconnect.run(ovs.timeval.msec())
        if action == ovs.reconnect.CONNECT:
            self.__connect()
        elif action == ovs.reconnect.DISCONNECT:
            self.reconnect.disconnected(ovs.timeval.msec(), 0)
            self.__disconnect()
        elif action == ovs.reconnect.PROBE:
            if self.rpc:
                request = Message.create_request("echo", [])
                request.id = "echo"
                self.rpc.send(request)
        else:
            assert action == None
Beispiel #18
0
    def delete_logical_port(self, event):
        data = event.metadata
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not pod_name:
            vlog.err("absent pod name in pod %s. "
                     "unable to delete logical port" % data)
            return

        try:
            ovn_nbctl("--if-exists", "lsp-del", logical_port)
        except Exception:
            vlog.exception("failure in delete_logical_port: lsp-del")
            return

        vlog.info("deleted logical port %s" % logical_port)
Beispiel #19
0
    def delete_logical_port(self, event):
        data = event.metadata
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not pod_name:
            vlog.err("absent pod name in pod %s. "
                     "unable to delete logical port" % data)
            return

        try:
            ovn_nbctl("--if-exists", "lsp-del", logical_port)
        except Exception:
            vlog.exception("failure in delete_logical_port: lsp-del")
            return

        vlog.info("deleted logical port %s" % logical_port)
Beispiel #20
0
    def delete_logical_port(self, event):
        data = event.metadata
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not logical_port:
            vlog.err("absent pod name in pod %s. "
                     "Not creating logical port" % (data))
            return

        try:
            ovn_nbctl("--if-exists", "lsp-del", logical_port)
        except Exception as e:
            vlog.err("_delete_logical_port: lsp-add (%s)" % (str(e)))
            return

        vlog.info("deleted logical port %s" % (logical_port))
Beispiel #21
0
def _monitor_daemon(daemon_pid):
    # XXX should log daemon's stderr output at startup time
    # XXX should use setproctitle module if available
    last_restart = None
    while True:
        retval, status = _waitpid(daemon_pid, 0)
        if retval < 0:
            sys.stderr.write("waitpid failed\n")
            sys.exit(1)
        elif retval == daemon_pid:
            status_msg = ("pid %d died, %s"
                          % (daemon_pid, ovs.process.status_msg(status)))

            if _should_restart(status):
                if os.WCOREDUMP(status) and sys.platform != "win32":
                    import resource
                    # Disable further core dumps to save disk space.
                    try:
                        resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
                    except resource.error:
                        vlog.warn("failed to disable core dumps")

                # Throttle restarts to no more than once every 10 seconds.
                if (last_restart is not None and
                    ovs.timeval.msec() < last_restart + 10000):
                    vlog.warn("%s, waiting until 10 seconds since last "
                              "restart" % status_msg)
                    while True:
                        now = ovs.timeval.msec()
                        wakeup = last_restart + 10000
                        if now > wakeup:
                            break
                        sys.stdout.write("sleep %f\n" % (
                            (wakeup - now) / 1000.0))
                        time.sleep((wakeup - now) / 1000.0)
                last_restart = ovs.timeval.msec()

                vlog.err("%s, restarting" % status_msg)
                daemon_pid = fork_and_wait_for_startup()
                if not daemon_pid:
                    break
            else:
                vlog.info("%s, exiting" % status_msg)
                sys.exit(0)
Beispiel #22
0
def _monitor_daemon(daemon_pid):
    # XXX should log daemon's stderr output at startup time
    # XXX should use setproctitle module if available
    last_restart = None
    while True:
        retval, status = _waitpid(daemon_pid, 0)
        if retval < 0:
            sys.stderr.write("waitpid failed\n")
            sys.exit(1)
        elif retval == daemon_pid:
            status_msg = ("pid %d died, %s" %
                          (daemon_pid, ovs.process.status_msg(status)))

            if _should_restart(status):
                if sys.platform != 'win32' and os.WCOREDUMP(status):
                    # Disable further core dumps to save disk space.
                    try:
                        resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
                    except resource.error:
                        vlog.warn("failed to disable core dumps")

                # Throttle restarts to no more than once every 10 seconds.
                if (last_restart is not None
                        and ovs.timeval.msec() < last_restart + 10000):
                    vlog.warn("%s, waiting until 10 seconds since last "
                              "restart" % status_msg)
                    while True:
                        now = ovs.timeval.msec()
                        wakeup = last_restart + 10000
                        if now > wakeup:
                            break
                        sys.stdout.write("sleep %f\n" %
                                         ((wakeup - now) / 1000.0))
                        time.sleep((wakeup - now) / 1000.0)
                last_restart = ovs.timeval.msec()

                vlog.err("%s, restarting" % status_msg)
                daemon_pid = _fork_and_wait_for_startup()
                if not daemon_pid:
                    break
            else:
                vlog.info("%s, exiting" % status_msg)
                sys.exit(0)
Beispiel #23
0
def fetch_autoprovision_script(url):
    ret = False
    try:
        cj = cookielib.CookieJar()
        header = {
            'User-Agent': 'OPS-AutoProvision/1.0',
            'OPS-MANUFACTURER': 'OpenSwitch',
            'OPS-VENDOR': 'OpenSwitch'
        }
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        request = urllib2.Request(url, None, header)

        vlog.info("Sending HTTP GET to %s" % url)
        f = opener.open(request)
        data = f.read()
        f.close()
        opener.close()
    except urllib2.HTTPError, e:
        print('HTTPError = ' + str(e.code))
        return ret
Beispiel #24
0
def get_src_ip_dstn_ns(source_ip, source_interface):
    '''
    Get source ip and destination namespace from source interface
    '''
    dstn_ns = None

    mgmt_ip = l3_utils.get_mgmt_ip(idl)
    vlog.info("mgmt_ip = %s\n" % (mgmt_ip))

    if source_ip is None and source_interface is None:
        return source_ip, dstn_ns

    if source_ip is None:
        source_ip = l3_utils.get_ip_from_interface(idl, source_interface)

    if source_ip != mgmt_ip:
        dstn_ns = vrf_utils.get_vrf_ns_from_name(idl,
                                                 vrf_utils.DEFAULT_VRF_NAME)

    return source_ip, dstn_ns
Beispiel #25
0
def wait_for_hw_done():
    '''
    Check db to see if h/w initialization has completed.
    If true: return True
    else: return False
    '''

    global idl

    # Check db to see if cfgd has already run.
    if db_is_cur_cfg_set(idl.tables):
        vlog.info("cur_cfg already set...cfgd exiting")
        return terminate()

    # Check db to see if h/w initialization has completed.

    if db_get_hw_done(idl.tables):
        return True
    else:
        # Delay a little before trying again
        sleep(0.2)
        return False
Beispiel #26
0
    def delete_logical_port(self, event):
        data = event.metadata
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not pod_name:
            vlog.err("absent pod name in pod %s. "
                     "unable to delete logical port" % data)
            return

        annotations = data['metadata']['annotations']
        ip_address = self._get_ip_address_from_annotations(annotations)
        if ip_address:
            self._delete_k8s_l4_port_name_cache(data, ip_address)

        try:
            ovn_nbctl("--if-exists", "lsp-del", logical_port)
        except Exception:
            vlog.exception("failure in delete_logical_port: lsp-del")
            return

        vlog.info("deleted logical port %s" % logical_port)
Beispiel #27
0
    def delete_logical_port(self, event):
        data = event.metadata
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not pod_name:
            vlog.err("absent pod name in pod %s. "
                     "unable to delete logical port" % data)
            return

        annotations = data['metadata']['annotations']
        ip_address = self._get_ip_address_from_annotations(annotations)
        if ip_address:
            self._delete_k8s_l4_port_name_cache(data, ip_address)

        try:
            ovn_nbctl("--if-exists", "lsp-del", logical_port)
        except Exception:
            vlog.exception("failure in delete_logical_port: lsp-del")
            return

        vlog.info("deleted logical port %s" % logical_port)
Beispiel #28
0
    def sync_pods(self, pods):
        expected_logical_ports = set()
        pods = pods.get('items', [])
        for pod in pods:
            pod_name = pod['metadata']['name']
            namespace = pod['metadata']['namespace']
            logical_port = "%s_%s" % (namespace, pod_name)
            annotations = pod['metadata']['annotations']
            expected_logical_ports.add(logical_port)

            # We should sync the container port names as there are no
            # guarantees that a endpoint creation event will become after
            # all the pods creation events.
            ip_address = self._get_ip_address_from_annotations(annotations)
            if ip_address:
                self._add_k8s_l4_port_name_cache(pod, ip_address)

        try:
            existing_logical_ports = ovn_nbctl(
                                "--data=bare", "--no-heading",
                                "--columns=name", "find",
                                "logical_switch_port",
                                "external-ids:pod=true").split()
            existing_logical_ports = set(existing_logical_ports)
        except Exception as e:
            vlog.err("sync_pods: find failed %s" % (str(e)))
            return

        for logical_port in existing_logical_ports - expected_logical_ports:
            try:
                ovn_nbctl("--if-exists", "lsp-del", logical_port)
            except Exception as e:
                vlog.err("sync_pods: failed to delete logical_port %s"
                         % (logical_port))
                continue

            vlog.info("sync_pods: Deleted logical port %s"
                      % (logical_port))
Beispiel #29
0
def push_config_to_db():
    '''
    Take the previously discovered startup configuration and
    push it to the database.
    '''

    global saved_config

    if saved_config is None:
        vlog.info('No saved configuration exists')
    else:
        #OPS_TODO: Change this log msg to the actual push code when available
        vlog.info('Config data found')
        try:
            data = json.loads(saved_config)
        except ValueError, e:
            print("Invalid json from configdb. Exception: %s\n" % e)
            return

        # set up IDL
        manager = OvsdbConnectionManager(settings.get('ovs_remote'),
                                         settings.get('ovs_schema'))
        manager.start()
        manager.idl.run()

        init_seq_no = manager.idl.change_seqno
        while True:
            manager.idl.run()
            if init_seq_no != manager.idl.change_seqno:
                break
            sleep(1)

        # read the schema
        schema = restparser.parseSchema(settings.get('ext_schema'))
        run_config_util = RunConfigUtil(manager.idl, schema)
        run_config_util.write_config_to_db(data)
Beispiel #30
0
    def update_from_ad(self):
        """Fetch user group info from AD and update model.

        Raises:
            ldap.INVALID_CREDENTIALS
            XXX: probably a bunch of ther ldap exceptions
        """
        # TODO(pjb): rewrite to be scalable, robust
        #vlog.dbg('Updating users from AD')
        l = ldap.initialize(LDAP_URI)
        l.simple_bind_s(BIND_USER, BIND_PW)

        ret = l.search_s('cn=Users,%s' % BASE_DN, ldap.SCOPE_SUBTREE,
                         '(&(objectCategory=person)(objectClass=user))')
        user_dns = [(u[1]['sAMAccountName'][0], u[0]) for u in ret]

        users_to_del = set(self.by_user.keys()) - set([u[0] for u in user_dns])
        for user in users_to_del:
            num_groups = len(self.by_user[user])
            vlog.info("User '%s' deleted (was in %s group%s)"
                      % (user, num_groups, '' if num_groups == 1 else 's'))
            ids = self.by_user.pop(user).values()
            for i in ids:
                del self.items[i]

        for user, dn in user_dns:
            filter_ = '(member:1.2.840.113556.1.4.1941:= %s)' % dn
            ret = l.search_s('cn=Users,%s' % BASE_DN, ldap.SCOPE_SUBTREE,
                             filter_)
            new_groups = set([r[1]['cn'][0] for r in ret])

            old_groups = set(self.by_user.get(user, {}).keys())
            membership_to_del = old_groups - new_groups
            membership_to_add = new_groups - old_groups

            for group in membership_to_del:
                id_ = self.by_user[user].pop(group)
                vlog.info("User '%s' removed from group '%s' (%s)"
                          % (user, group, id_))
                del self.items[id_]
            for group in membership_to_add:
                new_id = str(uuid.uuid4())
                self.by_user.setdefault(user, {})[group] = new_id
                vlog.info("User '%s' added to group '%s' (%s)"
                          % (user, group, new_id))
                self.items[new_id] = (user, group)
Beispiel #31
0
    def update_from_ad(self):
        """Fetch user group info from AD and update model.

        Raises:
            ldap.INVALID_CREDENTIALS
            XXX: probably a bunch of ther ldap exceptions
        """
        # TODO: rewrite to be scalable, robust
        #vlog.dbg('Updating users from AD')
        l = ldap.initialize(LDAP_URI)
        l.simple_bind_s(BIND_USER, BIND_PW)

        ret = l.search_s('cn=Users,%s' % BASE_DN, ldap.SCOPE_SUBTREE,
                         '(&(objectCategory=person)(objectClass=user))')
        user_dns = [(u[1]['sAMAccountName'][0], u[0]) for u in ret]

        users_to_del = set(self.by_user.keys()) - set([u[0] for u in user_dns])
        for user in users_to_del:
            num_groups = len(self.by_user[user])
            vlog.info("User '%s' deleted (was in %s group%s)" %
                      (user, num_groups, '' if num_groups == 1 else 's'))
            ids = self.by_user.pop(user).values()
            for i in ids:
                del self.items[i]

        for user, dn in user_dns:
            filter_ = '(member:1.2.840.113556.1.4.1941:= %s)' % dn
            ret = l.search_s('cn=Users,%s' % BASE_DN, ldap.SCOPE_SUBTREE,
                             filter_)
            new_groups = set([r[1]['cn'][0] for r in ret])

            old_groups = set(self.by_user.get(user, {}).keys())
            membership_to_del = old_groups - new_groups
            membership_to_add = new_groups - old_groups

            for group in membership_to_del:
                id_ = self.by_user[user].pop(group)
                vlog.info("User '%s' removed from group '%s' (%s)" %
                          (user, group, id_))
                del self.items[id_]
            for group in membership_to_add:
                new_id = str(uuid.uuid4())
                self.by_user.setdefault(user, {})[group] = new_id
                vlog.info("User '%s' added to group '%s' (%s)" %
                          (user, group, new_id))
                self.items[new_id] = (user, group)
Beispiel #32
0
    def sync_services(self, services):
        # For all the services, we will populate the below lists with
        # IP:port that act as VIP in the OVN load-balancers.
        tcp_nodeport_services = []
        udp_nodeport_services = []
        tcp_services = []
        udp_services = []
        services = services.get('items', [])
        for service in services:
            service_type = service['spec'].get('type')
            if service_type != "ClusterIP" and service_type != "NodePort":
                continue

            service_ip = service['spec'].get('clusterIP')
            if not service_ip:
                continue

            service_ports = service['spec'].get('ports')
            if not service_ports:
                continue

            external_ips = service['spec'].get('externalIPs')

            for service_port in service_ports:
                if service_type == "NodePort":
                    port = service_port.get('nodePort')
                else:
                    port = service_port.get('port')

                if not port:
                    continue

                protocol = service_port.get('protocol', 'TCP')

                if service_type == "NodePort":
                    physical_gateway_ips = self._get_physical_gateway_ips()
                    for gateway_ip in physical_gateway_ips:
                        key = "%s:%s" % (gateway_ip, port)
                        if protocol == "TCP":
                            tcp_nodeport_services.append(key)
                        else:
                            udp_nodeport_services.append(key)
                elif service_type == "ClusterIP":
                    key = "%s:%s" % (service_ip, port)
                    if protocol == "TCP":
                        tcp_services.append(key)
                    else:
                        udp_services.append(key)

                if external_ips:
                    for external_ip in external_ips:
                        key = "%s:%s" % (external_ip, port)
                        if protocol == "TCP":
                            tcp_nodeport_services.append(key)
                        else:
                            udp_nodeport_services.append(key)

        # For each of the OVN load-balancer, if the VIP that exists in
        # the load balancer is not seen in current k8s services, we
        # delete it.
        load_balancers = {
            variables.K8S_CLUSTER_LB_TCP: tcp_services,
            variables.K8S_CLUSTER_LB_UDP: udp_services,
            variables.K8S_NS_LB_TCP: tcp_nodeport_services,
            variables.K8S_NS_LB_UDP: udp_nodeport_services
        }

        for load_balancer, k8s_services in load_balancers.items():
            vips = self._get_load_balancer_vips(load_balancer)
            if not vips:
                continue

            for vip in vips:
                if vip not in k8s_services:
                    vip = "\"" + vip + "\""
                    try:
                        ovn_nbctl("remove", "load_balancer", load_balancer,
                                  "vips", vip)
                        vlog.info("sync_services: deleted vip %s from %s" %
                                  (vip, load_balancer))
                    except Exception as e:
                        vlog.err("sync_services: failed to remove vip %s"
                                 "from %s (%s)" % (vip, load_balancer, str(e)))
Beispiel #33
0
    def create_logical_port(self, event):
        data = event.metadata
        logical_switch = data['spec']['nodeName']
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not logical_switch or not logical_port:
            vlog.err("absent node name or pod name in pod %s. "
                     "Not creating logical port" % (data))
            return

        (gateway_ip, mask) = self._get_switch_gateway_ip(logical_switch)
        if not gateway_ip or not mask:
            vlog.err("_create_logical_port: failed to get gateway_ip")
            return

        try:
            ovn_nbctl("--", "--if-exists", "lsp-del", logical_port, "--",
                      "lsp-add", logical_switch, logical_port, "--",
                      "lsp-set-addresses", logical_port, "dynamic")
        except Exception as e:
            vlog.err("_create_logical_port: lsp-add (%s)" % (str(e)))
            return

        # We wait for a maximum of 3 seconds to get the dynamic addresses in
        # intervals of 0.1 seconds.
        addresses = ""
        counter = 30
        while counter != 0:
            try:
                ret = ovn_nbctl("get", "logical_switch_port", logical_port,
                                "dynamic_addresses")
                addresses = ast.literal_eval(ret)
                if len(addresses):
                    break
            except Exception as e:
                vlog.err("_create_logical_port: get dynamic_addresses (%s)" %
                         (str(e)))

            time.sleep(0.1)
            counter = counter - 1

        if not len(addresses):
            vlog.err("_create_logical_port: failed to get addresses after "
                     "multiple retries.")
            return

        (mac_address, ip_address) = addresses.split()

        namespace = data['metadata']['namespace']
        pod_name = data['metadata']['name']

        ip_address_mask = "%s/%s" % (ip_address, mask)

        annotation = {
            'ip_address': ip_address_mask,
            'mac_address': mac_address,
            'gateway_ip': gateway_ip
        }

        try:
            kubernetes.set_pod_annotation(variables.K8S_API_SERVER, namespace,
                                          pod_name, "ovn", str(annotation))
        except Exception as e:
            vlog.err(
                "_create_logical_port: failed to annotate addresses (%s)" %
                (str(e)))
            return

        vlog.info("created logical port %s" % (logical_port))
Beispiel #34
0
    def sync_services(self, services):
        # For all the services, we will populate the below lists with
        # IP:port that act as VIP in the OVN load-balancers.
        tcp_nodeport_services = []
        udp_nodeport_services = []
        tcp_services = []
        udp_services = []
        services = services.get('items', [])
        for service in services:
            service_type = service['spec'].get('type')
            if service_type != "ClusterIP" and service_type != "NodePort":
                continue

            service_ip = service['spec'].get('clusterIP')
            if not service_ip:
                continue

            service_ports = service['spec'].get('ports')
            if not service_ports:
                continue

            external_ips = service['spec'].get('externalIPs')

            for service_port in service_ports:
                if service_type == "NodePort":
                    port = service_port.get('nodePort')
                else:
                    port = service_port.get('port')

                if not port:
                    continue

                protocol = service_port.get('protocol', 'TCP')

                if service_type == "NodePort":
                    physical_gateway_ips = self._get_physical_gateway_ips()
                    for gateway_ip in physical_gateway_ips:
                        key = "%s:%s" % (gateway_ip, port)
                        if protocol == "TCP":
                            tcp_nodeport_services.append(key)
                        else:
                            udp_nodeport_services.append(key)
                elif service_type == "ClusterIP":
                    key = "%s:%s" % (service_ip, port)
                    if protocol == "TCP":
                        tcp_services.append(key)
                    else:
                        udp_services.append(key)

                if external_ips:
                    for external_ip in external_ips:
                        key = "%s:%s" % (external_ip, port)
                        if protocol == "TCP":
                            tcp_nodeport_services.append(key)
                        else:
                            udp_nodeport_services.append(key)

        # For each of the OVN load-balancer, if the VIP that exists in
        # the load balancer is not seen in current k8s services, we
        # delete it.
        load_balancers = {variables.K8S_CLUSTER_LB_TCP: tcp_services,
                          variables.K8S_CLUSTER_LB_UDP: udp_services,
                          variables.K8S_NS_LB_TCP: tcp_nodeport_services,
                          variables.K8S_NS_LB_UDP: udp_nodeport_services}

        for load_balancer, k8s_services in load_balancers.items():
            vips = self._get_load_balancer_vips(load_balancer)
            if not vips:
                continue

            for vip in vips:
                if vip not in k8s_services:
                    vip = "\"" + vip + "\""
                    try:
                        ovn_nbctl("remove", "load_balancer", load_balancer,
                                  "vips", vip)
                        vlog.info("sync_services: deleted vip %s from %s"
                                  % (vip, load_balancer))
                    except Exception as e:
                        vlog.err("sync_services: failed to remove vip %s"
                                 "from %s (%s)" % (vip, load_balancer, str(e)))
Beispiel #35
0
    def create_logical_port(self, event):
        data = event.metadata
        logical_switch = data['spec']['nodeName']
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not logical_switch or not pod_name:
            vlog.err("absent node name or pod name in pod %s. "
                     "Not creating logical port" % (data))
            return

        (gateway_ip, mask) = self._get_switch_gateway_ip(logical_switch)
        if not gateway_ip or not mask:
            vlog.err("_create_logical_port: failed to get gateway_ip")
            return

        try:
            ovn_nbctl("--wait=sb", "--", "--may-exist", "lsp-add",
                      logical_switch, logical_port, "--", "lsp-set-addresses",
                      logical_port, "dynamic", "--", "set",
                      "logical_switch_port", logical_port,
                      "external-ids:namespace=" + namespace,
                      "external-ids:pod=true")
        except Exception as e:
            vlog.err("_create_logical_port: lsp-add (%s)" % (str(e)))
            return

        try:
            ret = ovn_nbctl("get", "logical_switch_port", logical_port,
                            "dynamic_addresses")
            addresses = ast.literal_eval(ret)
        except Exception as e:
            vlog.err("_create_logical_port: get dynamic_addresses (%s)"
                     % (str(e)))

        if not len(addresses):
            vlog.err("_create_logical_port: failed to get dynamic address")
            return

        (mac_address, ip_address) = addresses.split()

        namespace = data['metadata']['namespace']
        pod_name = data['metadata']['name']

        ip_address_mask = "%s/%s" % (ip_address, mask)

        annotation = {'ip_address': str(ip_address_mask),
                      'mac_address': str(mac_address),
                      'gateway_ip': str(gateway_ip)}

        try:
            kubernetes.set_pod_annotation(variables.K8S_API_SERVER,
                                          namespace, pod_name,
                                          "ovn", json.dumps(annotation))
        except Exception as e:
            vlog.err("_create_logical_port: failed to annotate addresses (%s)"
                     % (str(e)))
            return

        vlog.info("created logical port %s" % (logical_port))

        self._add_k8s_l4_port_name_cache(data, ip_address)
Beispiel #36
0
def main():
    global idl
    argv = sys.argv
    n_args = 2
    if len(argv) != n_args:
        print("Requires %d arguments but %d provided \n" % (n_args, len(argv)))
        return

    # Locate default config if it exists
    schema_helper = ovs.db.idl.SchemaHelper(location=OVS_SCHEMA)
    schema_helper.register_columns(SYSTEM_TABLE, ["cur_cfg"])
    schema_helper.register_columns(SYSTEM_TABLE, ["auto_provisioning_status"])

    idl = ovs.db.idl.Idl(DEF_DB, schema_helper)

    seqno = idl.change_seqno  # Sequence number when last processed the db

    # Wait until the ovsdb sync up.
    while True:
        idl.run()
        if (seqno != idl.change_seqno):
            break

        poller = ovs.poller.Poller()
        idl.wait(poller)
        poller.block()

    wait_for_config_complete(idl)

    if os.path.exists(AUTOPROVISION_STATUS_FILE):
        vlog.info("Autoprovisioning already completed")
        update_autoprovision_status(AAA_TRUE_FLAG, argv[1])
        idl.close()
        return

    if (fetch_autoprovision_script(argv[1]) is False):
        print("Downloading autoprovisioning script failed")
        idl.close()
        return

    sys.stdout.flush()

    ret = 1
    if os.path.exists(AUTOPROVISION_SCRIPT):
        ret = os.system('chmod +x ' + AUTOPROVISION_SCRIPT)
        ret = os.system(AUTOPROVISION_SCRIPT)
        if (ret == 0):
            try:
                autoprovision_file = open(AUTOPROVISION_STATUS_FILE, "w")
                autoprovision_file.close()
            except IOError as e:
                print "Creating autoprovision status file, I/O error({0}): \
                      {1}".format(e.errno, e.strerror)
                idl.close()
                return
            except Exception, e:
                print('Creating autoprovision status file,'\
                    'generic exception: ' + str(e))
                idl.close()
                return

            update_autoprovision_status(AAA_TRUE_FLAG, argv[1])
            vlog.info("Autoprovision status: performed = %s URL =  %s" %
                      (AAA_TRUE_FLAG, argv[1]))
        else:
            vlog.err(
                "Error, executing autoprovision script returned error %d" %
                ret)
Beispiel #37
0
def get_server_list(session_type):

    server_list = []
    global global_tacacs_passkey, global_tacacs_timeout, global_tacacs_auth

    global global_radius_passkey, global_radius_timeout, global_radius_auth, global_radius_retries

    global local_group, authentication_group_list, authorization_group_list, none_group

    group_list = ""
    group_list_authorization = ""

    if local_group is None or none_group is None:
        for group_rec in idl.tables[AAA_SERVER_GROUP_TABLE].rows.itervalues():
            if group_rec.group_name == AAA_LOCAL:
                local_group = group_rec
            elif group_rec.group_name == AAA_NONE:
                none_group = group_rec

    for ovs_rec in idl.tables[SYSTEM_TABLE].rows.itervalues():
        if ovs_rec.aaa and ovs_rec.aaa is not None:
            for key, value in ovs_rec.aaa.iteritems():
                if key == GBL_TACACS_SERVER_TIMEOUT:
                    if (global_tacacs_timeout != value):
                        type_str = "global default " + key
                        event = global_tacacs_timeout + " => " + value
                        log_event("TACACS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_tacacs_timeout = value
                if key == GBL_TACACS_SERVER_AUTH_TYPE:
                    if (global_tacacs_auth != value):
                        type_str = "global default " + key
                        event = global_tacacs_auth + " => " + value
                        log_event("TACACS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_tacacs_auth = value
                if key == GBL_TACACS_SERVER_PASSKEY:
                    if (global_tacacs_passkey != value):
                        type_str = "global default " + key
                        event = global_tacacs_passkey + " => " + value
                        log_event("TACACS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_tacacs_passkey = value
                if key == GBL_RADIUS_SERVER_TIMEOUT:
                    if (global_radius_timeout != value):
                        type_str = "global default " + key
                        event = global_radius_timeout + " => " + value
                        log_event("RADIUS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_radius_timeout = value
                if key == GBL_RADIUS_SERVER_AUTH_TYPE:
                    if (global_radius_auth != value):
                        type_str = "global default " + key
                        event = global_radius_auth + " => " + value
                        log_event("RADIUS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_radius_auth = value
                if key == GBL_RADIUS_SERVER_PASSKEY:
                    if (global_radius_passkey != value):
                        type_str = "global default " + key
                        event = global_radius_passkey + " => " + value
                        log_event("RADIUS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_radius_passkey = value
                if key == GBL_RADIUS_SERVER_RETRIES:
                    if (global_radius_retries != value):
                        type_str = "global default " + key
                        event = global_radius_retries + " => " + value
                        log_event("RADIUS", ["type", type_str],
                                  ["action", "update"], ["event", event])
                        global_radius_retries = value
                if key == AAA_FAIL_THROUGH:
                    global AAA_FAIL_THROUGH_ENABLED
                    old = "disabled"
                    new = "disabled"

                    if value == AAA_TRUE_FLAG:
                        if AAA_FAIL_THROUGH_ENABLED == False:
                            new = "enabled"
                            AAA_FAIL_THROUGH_ENABLED = True
                    else:
                        if AAA_FAIL_THROUGH_ENABLED == True:
                            old = "enabled"
                            AAA_FAIL_THROUGH_ENABLED = False
                    if old != new:
                        log_event("AAA_CONFIG", ["type", "Fail-through"],
                                  ["event", new])

    for ovs_rec in idl.tables[AAA_SERVER_GROUP_PRIO_TABLE].rows.itervalues():
        if ovs_rec.session_type != session_type:
            continue

        size = len(ovs_rec.authentication_group_prios)
        if size == 1 and ovs_rec.authentication_group_prios.keys(
        )[0] == local_group:
            vlog.info("AAA: Default local authentication configured\n")
        else:
            for prio, group in sorted(
                    ovs_rec.authentication_group_prios.iteritems()):
                if group is None:
                    continue

                group_list = group_list + group.group_name + " "
                vlog.info(
                    "AAA Authentication: group_name = %s, group_type = %s\n" %
                    (group.group_name, group.group_type))

                server_table = ""
                if group.group_type == AAA_TACACS_PLUS:
                    server_table = TACACS_SERVER_TABLE
                elif group.group_type == AAA_RADIUS:
                    server_table = RADIUS_SERVER_TABLE
                elif group.group_type == AAA_LOCAL:
                    server_list.append((0, group.group_type))

                if server_table == RADIUS_SERVER_TABLE or server_table == TACACS_SERVER_TABLE:
                    group_server_dict = {}

                    if group.group_name == AAA_RADIUS or group.group_name == AAA_TACACS_PLUS:
                        for server in idl.tables[server_table].rows.itervalues(
                        ):
                            vlog.info(
                                "AAA: Server %s length = %s group = %s group_prio = %s default_prio = %s\n"
                                % (server.address, len(
                                    server.group), server.group[0].group_name,
                                   server.user_group_priority[0],
                                   server.default_group_priority))
                            group_server_dict[
                                server.default_group_priority] = server
                    else:
                        for server in idl.tables[server_table].rows.itervalues(
                        ):
                            vlog.info(
                                "AAA: Server %s length = %s group = %s group_prio = %s default_prio = %s\n"
                                % (server.address, len(
                                    server.group), server.group[0].group_name,
                                   server.user_group_priority[0],
                                   server.default_group_priority))
                            if server.group[0] == group or server.group[
                                    1] == group:
                                group_server_dict[
                                    server.user_group_priority[0]] = server

                    vlog.info("AAA: group_server_dict = %s\n" %
                              (group_server_dict))

                    for server_prio, server in sorted(
                            group_server_dict.iteritems()):
                        server_list.append((server, group.group_type))

        size_author = len(ovs_rec.authorization_group_prios)
        if size_author == 1 and ovs_rec.authorization_group_prios.keys(
        )[0] == none_group:
            vlog.info("AAA: Default none authorization configured\n")
        else:
            for prio, group in sorted(
                    ovs_rec.authorization_group_prios.iteritems()):
                if group is None:
                    continue

                group_list_authorization = group_list_authorization + group.group_name + " "
                vlog.info(
                    "AAA Authorization: group_name = %s, group_type = %s\n" %
                    (group.group_name, group.group_type))

    group_list = group_list.strip()
    if group_list != authentication_group_list:
        event = authentication_group_list + " => " + group_list
        log_event("AAA_CONFIG",
                  ["type", "Authentication server group priority list"],
                  ["event", event])
        authentication_group_list = group_list

    group_list_authorization = group_list_authorization.strip()
    if group_list_authorization != authorization_group_list:
        event = authorization_group_list + " => " + group_list_authorization
        log_event("AAA_CONFIG",
                  ["type", "Authorization server group priority list"],
                  ["event", event])
        authorization_group_list = group_list_authorization

    return server_list
Beispiel #38
0
def modify_common_auth_access_file(server_list):
    '''
    modify common-auth-access file, based on RADIUS, TACACS+ and local
    values set in the DB
    '''
    global tacacs_source_interface
    global tacacs_source_ip
    global tacacs_dstn_ns

    global radius_source_interface
    global radius_source_ip
    global radius_dstn_ns

    tacacs_src_ip = None
    tacacs_dstn_ns_old = tacacs_dstn_ns

    radius_src_ip = None
    radius_dstn_ns_old = radius_dstn_ns

    tacacs_src_ip, tacacs_dstn_ns = \
        get_src_ip_dstn_ns(tacacs_source_ip, tacacs_source_interface)

    radius_src_ip, radius_dstn_ns = \
        get_src_ip_dstn_ns(radius_source_ip, radius_source_interface)

    if tacacs_dstn_ns != tacacs_dstn_ns_old:
        log_event("TACACS", ["type", "Destination Namespace"],
                  ["action", "update"], ["event", tacacs_dstn_ns])

    if radius_dstn_ns != radius_dstn_ns_old:
        log_event("RADIUS", ["type", "Destination Namespace"],
                  ["action", "update"], ["event", radius_dstn_ns])

    vlog.info("tacacs_src_interface = %s, radius_src_interface = %s," \
        " tacacs_src_ip = %s, tacacs_dstn_ns = %s, radius_src_ip = %s," \
        " radius_dstn_ns = %s" \
        % (str(tacacs_source_interface), str(radius_source_interface), \
        str(tacacs_src_ip), str(tacacs_dstn_ns), \
         str(radius_src_ip), str(radius_dstn_ns)))

    global global_tacacs_passkey, global_tacacs_timeout, global_tacacs_auth
    global global_radius_passkey, global_radius_timeout, global_radius_auth, global_radius_retries
    vlog.info("AAA: server_list = %s\n" % server_list)
    if not server_list:
        vlog.info("AAA: server_list is empty. Adding default local")

        server_list.append((0, AAA_LOCAL))

    file_header = "# THIS IS AN AUTO-GENERATED FILE\n" \
                  "#\n" \
                  "# /etc/pam.d/common-auth- authentication settings common to all services\n" \
                  "# This file is included from other service-specific PAM config files,\n" \
                  "# and should contain a list of the authentication modules that define\n" \
                  "# the central authentication scheme for use on the system\n" \
                  "# (e.g., /etc/shadow, LDAP, Kerberos, etc.). The default is to use the\n" \
                  "# traditional Unix authentication mechanisms.\n" \
                  "#\n" \
                  "# here are the per-package modules (the \"Primary\" block)\n"

    file_footer = "#\n" \
                  "# here's the fallback if no module succeeds\n" \
                  "auth    requisite                       pam_deny.so\n" \
                  "# prime the stack with a positive return value if there isn't one already;\n" \
                  "# this avoids us returning an error just because nothing sets a success code\n" \
                  "# since the modules above will each just jump around\n" \
                  "auth    required                        pam_permit.so\n" \
                  "# and here are more per-package modules (the \"Additional\" block)\n"

    file_body = ""

    common_auth_access_filename = PAM_ETC_CONFIG_DIR + "common-auth-access"

    pam_server_list_str = "server list -"

    with open(common_auth_access_filename, "w") as f:

        # Write the file header
        f.write(file_header)

        # Now write the server list to the config file
        PAM_CONTROL_VALUE = "[success=done new_authtok_reqd=done default=ignore auth_err=die]"
        if AAA_FAIL_THROUGH_ENABLED:
            # The following is exactly same as saying: PAM_CONTROL_VALUE = "sufficient"
            PAM_CONTROL_VALUE = "[success=done new_authtok_reqd=done default=ignore]"

        for server, server_type in server_list[:-1]:
            auth_line = ""
            if server_type == AAA_LOCAL:
                auth_line = "auth\t" + PAM_CONTROL_VALUE + "\t" + PAM_LOCAL_MODULE + " nullok\n"
                pam_server_list_str = pam_server_list_str + " local"
            elif server_type == AAA_TACACS_PLUS:
                ip_address = server.address
                tcp_port = server.tcp_port[0]
                if len(server.timeout) == 0:
                    timeout = global_tacacs_timeout
                else:
                    timeout = server.timeout[0]
                if len(server.auth_type) == 0:
                    auth_type = global_tacacs_auth
                else:
                    auth_type = server.auth_type[0]
                if len(server.passkey) == 0:
                    passkey = global_tacacs_passkey
                else:
                    passkey = server.passkey[0]

                if tacacs_dstn_ns is not None:
                    auth_line = "auth\t" + PAM_CONTROL_VALUE + "\t" + PAM_TACACS_MODULE + "\tdebug server=" + ip_address + \
                        ":" + str(tcp_port) + " secret=" + str(passkey) + " login="******" timeout=" + str(timeout) + \
                        " dstn_namespace=" + tacacs_dstn_ns +  " source_ip=" + str(tacacs_src_ip) + " \n"
                else:
                    auth_line = "auth\t" + PAM_CONTROL_VALUE + "\t" + PAM_TACACS_MODULE + "\tdebug server=" + ip_address + \
                        ":" + str(tcp_port) + " secret=" + str(passkey) + " login="******" timeout=" + str(timeout) + "\n"
                pam_server_list_str = pam_server_list_str + " " + ip_address

            elif server_type == AAA_RADIUS:
                ip_address = server.address
                udp_port = server.udp_port[0]
                if len(server.timeout) == 0:
                    timeout = global_radius_timeout
                else:
                    timeout = server.timeout[0]
                if len(server.auth_type) == 0:
                    auth_type = global_radius_auth
                else:
                    auth_type = server.auth_type[0]
                if len(server.passkey) == 0:
                    passkey = global_radius_passkey
                else:
                    passkey = server.passkey[0]
                if len(server.retries) == 0:
                    retries = global_radius_retries
                else:
                    retries = server.retries[0]
                if radius_dstn_ns is not None:
                    auth_line = "auth\t" + PAM_CONTROL_VALUE + "\t" + PAM_RADIUS_MODULE + "\tdebug server=" + ip_address + ":" + \
                        str(udp_port) + " secret=" + str(passkey) + " login="******" retry=" + str(retries) + \
                        " timeout=" + str(timeout) + " dstn_namespace=" + radius_dstn_ns +  " source_ip=" + str(radius_src_ip) + "\n"
                else:
                    auth_line = "auth\t" + PAM_CONTROL_VALUE + "\t" + PAM_RADIUS_MODULE + "\tdebug server=" + ip_address + ":" + \
                        str(udp_port) + " secret=" + str(passkey) + " login="******" retry=" + str(retries) + \
                        " timeout=" + str(timeout) + "\n"

            file_body += auth_line

        # Print the last element
        server = server_list[-1][0]
        server_type = server_list[-1][1]
        auth_line = ""
        if server_type == AAA_LOCAL:
            auth_line = "auth\t[success=1 default=ignore]\t" + PAM_LOCAL_MODULE + " nullok\n"
            pam_server_list_str = pam_server_list_str + " local"
        elif server_type == AAA_TACACS_PLUS:
            ip_address = server.address
            tcp_port = server.tcp_port[0]
            if len(server.timeout) == 0:
                timeout = global_tacacs_timeout
            else:
                timeout = server.timeout[0]
            if len(server.auth_type) == 0:
                auth_type = global_tacacs_auth
            else:
                auth_type = server.auth_type[0]
            if len(server.passkey) == 0:
                passkey = global_tacacs_passkey
            else:
                passkey = server.passkey[0]

            if tacacs_dstn_ns is not None:
                auth_line = "auth\t[success=1 default=ignore]\t" + PAM_TACACS_MODULE + "\tdebug server=" + ip_address + \
                    " secret=" + str(passkey) + " login="******" timeout=" + str(timeout) + \
                    " dstn_namespace=" + tacacs_dstn_ns +  " source_ip=" + str(tacacs_src_ip) + " \n"
            else:
                auth_line = "auth\t[success=1 default=ignore]\t" + PAM_TACACS_MODULE + "\tdebug server=" + ip_address + \
                    " secret=" + str(passkey) + " login="******" timeout=" + str(timeout) + " \n"

            pam_server_list_str = pam_server_list_str + " " + ip_address

        elif server_type == AAA_RADIUS:
            ip_address = server.address
            udp_port = server.udp_port[0]
            if len(server.timeout) == 0:
                timeout = global_radius_timeout
            else:
                timeout = server.timeout[0]
            if len(server.auth_type) == 0:
                auth_type = global_radius_auth
            else:
                auth_type = server.auth_type[0]
            if len(server.passkey) == 0:
                passkey = global_radius_passkey
            else:
                passkey = server.passkey[0]
            if len(server.retries) == 0:
                retries = global_radius_retries
            else:
                retries = server.retries[0]

            if radius_dstn_ns is not None:
                auth_line = "auth\t[success=1 default=ignore]\t"  + PAM_RADIUS_MODULE + "\tdebug server=" + ip_address + \
                    ":" +  str(udp_port) + " secret=" + str(passkey) + " login="******" retry=" + str(retries) + \
                    " timeout=" + str(timeout) + " dstn_namespace=" + radius_dstn_ns +  " source_ip=" + str(radius_src_ip) + "\n"
            else:
                auth_line = "auth\t[success=1 default=ignore]\t"  + PAM_RADIUS_MODULE + "\tdebug server=" + ip_address + \
                    ":" +  str(udp_port) + " secret=" + str(passkey) + " login="******" retry=" + str(retries) + \
                    " timeout=" + str(timeout) + "\n"

        file_body += auth_line

        # Write the PAM configurations for authentication
        f.write(file_body)

        # Write the file footer
        f.write(file_footer)
    log_event("AAA_CONFIG", ["type", "Authentication PAM configuration file"],
              ["event", pam_server_list_str])
Beispiel #39
0
    def create_logical_port(self, event):
        data = event.metadata
        logical_switch = data['spec']['nodeName']
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not logical_switch or not pod_name:
            vlog.err("absent node name or pod name in pod %s. "
                     "Not creating logical port" % (data))
            return

        (gateway_ip, mask) = self._get_switch_gateway_ip(logical_switch)
        if not gateway_ip or not mask:
            vlog.err("_create_logical_port: failed to get gateway_ip")
            return

        try:
            ovn_nbctl("--", "--may-exist", "lsp-add", logical_switch,
                      logical_port, "--", "lsp-set-addresses",
                      logical_port, "dynamic", "--", "set",
                      "logical_switch_port", logical_port,
                      "external-ids:namespace=" + namespace,
                      "external-ids:pod=true")
        except Exception as e:
            vlog.err("_create_logical_port: lsp-add (%s)" % (str(e)))
            return

        # We wait for a maximum of 3 seconds to get the dynamic addresses in
        # intervals of 0.1 seconds.
        addresses = ""
        counter = 30
        while counter != 0:
            try:
                ret = ovn_nbctl("get", "logical_switch_port", logical_port,
                                "dynamic_addresses")
                addresses = ast.literal_eval(ret)
                if len(addresses):
                    break
            except Exception as e:
                vlog.err("_create_logical_port: get dynamic_addresses (%s)"
                         % (str(e)))

            time.sleep(0.1)
            counter = counter - 1

        if not len(addresses):
            vlog.err("_create_logical_port: failed to get addresses after "
                     "multiple retries.")
            return

        (mac_address, ip_address) = addresses.split()

        namespace = data['metadata']['namespace']
        pod_name = data['metadata']['name']

        ip_address_mask = "%s/%s" % (ip_address, mask)

        annotation = {'ip_address': ip_address_mask,
                      'mac_address': mac_address,
                      'gateway_ip': gateway_ip}

        try:
            kubernetes.set_pod_annotation(variables.K8S_API_SERVER,
                                          namespace, pod_name,
                                          "ovn", str(annotation))
        except Exception as e:
            vlog.err("_create_logical_port: failed to annotate addresses (%s)"
                     % (str(e)))
            return

        vlog.info("created logical port %s" % (logical_port))
Beispiel #40
0
def update_mgmt_intf_status(hostname, dns_1, dns_2, domainname):
    global idl
    retry_count = 50
    while retry_count > 0:
        status_data = {}
        is_update = False

        for ovs_rec in idl.tables[SYSTEM_TABLE].rows.itervalues():
            if ovs_rec.mgmt_intf_status:
                status_data = ovs_rec.mgmt_intf_status
                break

        dhcp_hostname = status_data.get(MGMT_INTF_KEY_DHCP_HOSTNAME,
                                        MGMT_INTF_NULL_VAL)
        ovsdb_dns1 = status_data.get(MGMT_INTF_KEY_DNS1, DEFAULT_IPV4)
        ovsdb_dns2 = status_data.get(MGMT_INTF_KEY_DNS2, DEFAULT_IPV4)
        dhcp_domainname = status_data.get(MGMT_INTF_KEY_DHCP_DOMAIN_NAME,
                                          MGMT_INTF_NULL_VAL)
        # Storing hostname as received from host-name option from dhcp server.
        # This may or may not be resolvable by the local domain
        # Eg: www.nx1,www,etc.
        if dhcp_hostname != hostname:
            if hostname != MGMT_INTF_NULL_VAL:
                status_data[MGMT_INTF_KEY_DHCP_HOSTNAME] = hostname
            else:
                del status_data[MGMT_INTF_KEY_DHCP_HOSTNAME]
            is_update = True
    # Storing domainname as received from domain-name option from dhcp server.
    # This would be used to resolv various hosts during dns queries
    # Eg: slave.example.com,internal.example.com,example.com,etc.
        if domainname != dhcp_domainname:
            if domainname != MGMT_INTF_NULL_VAL:
                status_data[MGMT_INTF_KEY_DHCP_DOMAIN_NAME] = domainname
            else:
                del status_data[MGMT_INTF_KEY_DHCP_DOMAIN_NAME]
            is_update = True

        if dns_1 != 'None':
            if dns_1 != ovsdb_dns1:
                status_data[MGMT_INTF_KEY_DNS1] = dns_1
                is_update = True
        elif ovsdb_dns1 != DEFAULT_IPV4:
            mgmt_intf_clear_dns_conf()
            del status_data[MGMT_INTF_KEY_DNS1]
            is_update = True

        if dns_2 != 'None':
            if dns_2 != ovsdb_dns2:
                status_data[MGMT_INTF_KEY_DNS2] = dns_2
                is_update = True
        elif ovsdb_dns2 != DEFAULT_IPV4:
            del status_data[MGMT_INTF_KEY_DNS2]
            is_update = True

    # create the transaction
        if is_update:
            txn = ovs.db.idl.Transaction(idl)
            ovs_rec.verify("mgmt_intf_status")
            setattr(ovs_rec, "mgmt_intf_status", status_data)
            status = txn.commit_block()
            if status == "try again":
                vlog.info("ovsdb not in syn.Hence retrying the transaction")
                retry_count = retry_count - 1
                continue
            if status != "success" and status != "unchanged":
                vlog.err("Updating DHCP hostname status column failed \
                        with status %s" % (status))
                return False

        return True
Beispiel #41
0
    def create_logical_port(self, event):
        data = event.metadata
        logical_switch = data['spec']['nodeName']
        pod_name = data['metadata']['name']
        namespace = data['metadata']['namespace']
        logical_port = "%s_%s" % (namespace, pod_name)
        if not logical_switch or not pod_name:
            vlog.err("absent node name or pod name in pod %s. "
                     "Not creating logical port" % (data))
            return

        (gateway_ip, mask) = self._get_switch_gateway_ip(logical_switch)
        if not gateway_ip or not mask:
            vlog.err("_create_logical_port: failed to get gateway_ip")
            return

        try:
            ovn_nbctl("--wait=sb", "--", "--may-exist", "lsp-add",
                      logical_switch, logical_port, "--", "lsp-set-addresses",
                      logical_port, "dynamic", "--", "set",
                      "logical_switch_port", logical_port,
                      "external-ids:namespace=" + namespace,
                      "external-ids:pod=true")
        except Exception as e:
            vlog.err("_create_logical_port: lsp-add (%s)" % (str(e)))
            return

        try:
            ret = ovn_nbctl("get", "logical_switch_port", logical_port,
                            "dynamic_addresses")
            addresses = ast.literal_eval(ret)
        except Exception as e:
            vlog.err("_create_logical_port: get dynamic_addresses (%s)" %
                     (str(e)))

        if not len(addresses):
            vlog.err("_create_logical_port: failed to get dynamic address")
            return

        (mac_address, ip_address) = addresses.split()

        namespace = data['metadata']['namespace']
        pod_name = data['metadata']['name']

        ip_address_mask = "%s/%s" % (ip_address, mask)

        annotation = {
            'ip_address': ip_address_mask,
            'mac_address': mac_address,
            'gateway_ip': gateway_ip
        }

        try:
            kubernetes.set_pod_annotation(variables.K8S_API_SERVER, namespace,
                                          pod_name, "ovn", str(annotation))
        except Exception as e:
            vlog.err(
                "_create_logical_port: failed to annotate addresses (%s)" %
                (str(e)))
            return

        vlog.info("created logical port %s" % (logical_port))

        self._add_k8s_l4_port_name_cache(data, ip_address)