示例#1
0
    def get(self) -> Munch:
        asset = self.BASE_ASSET.copy()
        self._verify_asset_fields(asset)

        with utils.file_lock_context(self._lock_file):
            assets_in_use = self._get_assets_in_use_from_assets_file()

            self._fill_allocated_ips_and_bridges_from_assets_file(
                assets_in_use)
            self._fill_allocated_ips_and_bridges_by_interface()
            self._fill_virsh_allocated_ips_and_bridges()

            self._override_ip_networks_values_if_not_free(asset)
            self._override_network_bridges_values_if_not_free(asset)

            self._taken_assets.add(str(asset))
            assets_in_use.append(asset)

            self._dump_all_assets_in_use_to_assets_file(assets_in_use)

        self._allocated_bridges.clear()
        self._allocated_ips_objects.clear()

        log.info("Taken asset: %s", asset)
        return Munch.fromDict(asset)
 def list_leases(self, network_name):
     with utils.file_lock_context():
         net = self.libvirt_connection.networkLookupByName(network_name)
         leases = net.DHCPLeases(
         )  # TODO: getting the information from the XML dump until dhcp-leases bug is fixed
         hosts = self._get_hosts_from_network(net)
         return leases + [
             h for h in hosts
             if h["ipaddr"] not in [ls["ipaddr"] for ls in leases]
         ]
示例#3
0
def get_network_leases(network_name):
    warnings.warn(
        "get_network_leases is deprecated. Use LibvirtController.get_network_leases "
        "instead.", DeprecationWarning)
    with utils.file_lock_context():
        net = conn.networkLookupByName(network_name)
        leases = net.DHCPLeases(
        )  # TODO: getting the information from the XML dump until dhcp-leases bug is fixed
        hosts = _get_hosts_from_network(net)
        return leases + [
            h for h in hosts
            if h["ipaddr"] not in [ls["ipaddr"] for ls in leases]
        ]
示例#4
0
def clean_virsh_resources(skip_list, resource_filter):
    with utils.file_lock_context():
        _clean_domains(skip_list, resource_filter)
        _clean_pools(skip_list, resource_filter)
        _clean_networks(skip_list, resource_filter)
示例#5
0
 def release_all(self):
     with utils.file_lock_context(self._lock_file):
         assets_in_use = self._get_assets_in_use_from_assets_file()
         self._remove_taken_assets_from_all_assets_in_use(assets_in_use)
         self._dump_all_assets_in_use_to_assets_file(assets_in_use)
示例#6
0
    def start_install_and_wait_for_installed(self):
        cluster_name = self.config.day1_cluster_name
        # Running twice as a workaround for an issue with terraform not spawning a new node on first apply.
        for _ in range(2):
            with utils.file_lock_context():
                utils.run_command(
                    f"make _apply_terraform CLUSTER_NAME={cluster_name} PLATFORM={consts.Platforms.BARE_METAL}"
                )
        time.sleep(5)

        num_nodes_to_wait = self.config.day2_workers_count
        installed_status = consts.NodesStatus.DAY2_INSTALLED

        tfvars = utils.get_tfvars(self.config.tf_folder)
        tf_network_name = tfvars["libvirt_network_name"]

        config = TerraformConfig()
        config.nodes_count = num_nodes_to_wait
        libvirt_controller = LibvirtController(config=config,
                                               entity_config=ClusterConfig())
        libvirt_controller.wait_till_nodes_are_ready(
            network_name=tf_network_name)

        # Wait for day2 nodes
        waiting.wait(
            lambda: self.are_libvirt_nodes_in_cluster_hosts(),
            timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
            sleep_seconds=10,
            waiting_for="Nodes to be registered in inventory service",
        )
        self.set_nodes_hostnames_if_needed(tf_network_name)
        wait_till_all_hosts_are_in_status(
            client=self.api_client,
            cluster_id=self.config.cluster_id,
            nodes_count=self.config.day2_workers_count,
            statuses=[consts.NodesStatus.KNOWN],
            interval=30,
        )

        # Start day2 nodes installation
        log.info("Start installing all known nodes in the cluster %s",
                 self.config.cluster_id)
        kubeconfig = utils.get_kubeconfig_path(self.config.day1_cluster_name)
        ocp_ready_nodes = self.get_ocp_cluster_ready_nodes_num(kubeconfig)
        hosts = self.api_client.get_cluster_hosts(self.config.cluster_id)
        [
            self.api_client.install_day2_host(self.config.infra_env_id,
                                              host["id"]) for host in hosts
            if host["status"] == "known"
        ]

        log.info(
            "Waiting until all nodes of cluster %s have been installed (reached added-to-existing-cluster)",
            self.config.cluster_id,
        )
        wait_till_all_hosts_are_in_status(
            client=self.api_client,
            cluster_id=self.config.cluster_id,
            nodes_count=num_nodes_to_wait,
            statuses=[installed_status],
            interval=30,
        )

        log.info(
            "Waiting until installed nodes has actually been added to the OCP cluster"
        )
        waiting.wait(
            lambda: self.wait_nodes_join_ocp_cluster(
                ocp_ready_nodes, self.config.day2_workers_count, kubeconfig),
            timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
            sleep_seconds=30,
            waiting_for="Day2 nodes to be added to OCP cluster",
            expected_exceptions=Exception,
        )
        log.info("%d worker nodes were successfully added to OCP cluster",
                 self.config.day2_workers_count)
示例#7
0
def day2_nodes_flow(client, terraform_cluster_dir_prefix, tf_folder, cluster,
                    has_ipv_6, num_worker_nodes, install_cluster_flag,
                    day2_type_flag, with_static_network_config,
                    base_cluster_name):
    tf_network_name, total_num_nodes = get_network_num_nodes_from_tf(tf_folder)

    # Running twice as a workaround for an issue with terraform not spawning a new node on first apply.
    for _ in range(2):
        with utils.file_lock_context():
            utils.run_command(
                f'make _apply_terraform CLUSTER_NAME={terraform_cluster_dir_prefix} PLATFORM={consts.Platforms.BARE_METAL}'
            )
    time.sleep(5)

    if day2_type_flag == "ocp":
        num_nodes_to_wait = total_num_nodes
        installed_status = consts.NodesStatus.INSTALLED
    else:
        num_nodes_to_wait = num_worker_nodes
        installed_status = consts.NodesStatus.DAY2_INSTALLED

    wait_till_nodes_are_ready(nodes_count=num_nodes_to_wait,
                              network_name=tf_network_name)

    waiting.wait(
        lambda: are_libvirt_nodes_in_cluster_hosts(client, cluster.id,
                                                   num_nodes_to_wait),
        timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
        sleep_seconds=10,
        waiting_for="Nodes to be registered in inventory service",
    )

    set_nodes_hostnames_if_needed(client, tf_folder,
                                  with_static_network_config, has_ipv_6,
                                  tf_network_name, cluster.id)

    wait_till_all_hosts_are_in_status(
        client=client,
        cluster_id=cluster.id,
        nodes_count=num_worker_nodes,
        statuses=[consts.NodesStatus.KNOWN],
        interval=30,
    )

    if install_cluster_flag:
        log.info("Start installing all known nodes in the cluster %s",
                 cluster.id)
        kubeconfig = utils.get_kubeconfig_path(base_cluster_name)
        ocp_orig_ready_nodes = get_ocp_cluster_ready_nodes_num(kubeconfig)
        hosts = client.get_cluster_hosts(cluster.id)
        [
            client.install_day2_host(cluster.id, host['id']) for host in hosts
            if host["status"] == 'known'
        ]

        log.info(
            "Start waiting until all nodes of cluster %s have been installed( reached added-to-existing-clustertate)",
            cluster.id)
        wait_till_all_hosts_are_in_status(
            client=client,
            cluster_id=cluster.id,
            nodes_count=num_nodes_to_wait,
            statuses=[installed_status],
            interval=30,
        )

        log.info(
            "Start waiting until installed nodes has actually been added to the OCP cluster"
        )
        waiting.wait(lambda: wait_nodes_join_ocp_cluster(
            ocp_orig_ready_nodes, num_worker_nodes, day2_type_flag, kubeconfig
        ),
                     timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
                     sleep_seconds=30,
                     waiting_for="Day2 nodes to be added to OCP cluster",
                     expected_exceptions=Exception)
        log.info("%d worker nodes were successfully added to OCP cluster",
                 num_worker_nodes)
def create_nodes(
        tf
):
    log.info('Start running terraform')
    with utils.file_lock_context():
        return tf.apply()