Exemplo n.º 1
0
    def get(self) -> Munch:
        asset = self.BASE_ASSET.copy()
        self._verify_asset_fields(asset)

        with utils.file_lock_context(self._lock_file):
            assets_in_use = self._get_assets_in_use_from_assets_file()

            self._fill_allocated_ips_and_bridges_from_assets_file(
                assets_in_use)
            self._fill_allocated_ips_and_bridges_by_interface()
            self._fill_virsh_allocated_ips_and_bridges()

            self._override_ip_networks_values_if_not_free(asset)
            self._override_network_bridges_values_if_not_free(asset)

            self._taken_assets.add(str(asset))
            assets_in_use.append(asset)

            self._dump_all_assets_in_use_to_assets_file(assets_in_use)

        self._allocated_bridges.clear()
        self._allocated_ips_objects.clear()

        logging.info("Taken asset: %s", asset)
        return Munch.fromDict(asset)
Exemplo n.º 2
0
    def get(self) -> Munch:
        asset = self.BASE_ASSET.copy()
        self._verify_asset_fields(asset)

        with utils.file_lock_context(self._lock_file):
            all_assets = []
            if os.path.isfile(self._assets_file):
                with open(self._assets_file) as fp:
                    all_assets = json.load(fp)

            self._fill_allocated_ips_and_bridges_from_assets_file(all_assets)
            self._fill_allocated_ips_and_bridges_by_interface()
            self._fill_virsh_allocated_ips_and_bridges()

            self._override_ip_networks_values_if_not_free(asset)
            self._override_network_bridges_values_if_not_free(asset)

            self._taken_assets.add(str(asset))
            all_assets.append(asset)

            with open(self._assets_file, "w") as fp:
                json.dump(all_assets, fp)

        self._allocated_bridges.clear()
        self._allocated_ips_objects.clear()

        logging.info("Taken asset: %s", asset)
        return Munch.fromDict(asset)
Exemplo n.º 3
0
 def release(self, assets):
     logging.info("Returning %d assets", len(assets))
     logging.debug("Assets to return: %s", assets)
     with utils.file_lock_context(self.lock_file):
         with open(self.assets_file) as _file:
             all_assets = json.load(_file)
         all_assets.extend([Munch.toDict(asset) for asset in assets])
         with open(self.assets_file, "w") as _file:
             json.dump(all_assets, _file)
Exemplo n.º 4
0
    def release_all(self):
        with utils.file_lock_context(self._lock_file):
            with open(self._assets_file) as fp:
                all_assets = json.load(fp)

            self._remove_taken_assets_from_all_assets(all_assets)

            with open(self._assets_file, "w") as fp:
                json.dump(all_assets, fp)
Exemplo n.º 5
0
def _try_to_delete_nodes(tf_folder):
    log.info('Start running terraform delete')
    with utils.file_lock_context():
        utils.run_command_with_output(f'cd {tf_folder} && '
                                      'terraform destroy '
                                      '-auto-approve '
                                      '-input=false '
                                      '-state=terraform.tfstate '
                                      '-state-out=terraform.tfstate '
                                      '-var-file=terraform.tfvars.json')
Exemplo n.º 6
0
 def list_leases(self, network_name):
     with utils.file_lock_context():
         net = self.libvirt_connection.networkLookupByName(network_name)
         leases = net.DHCPLeases(
         )  # TODO: getting the information from the XML dump until dhcp-leases bug is fixed
         hosts = self._get_hosts_from_network(net)
         return leases + [
             h for h in hosts
             if h["ipaddr"] not in [ls["ipaddr"] for ls in leases]
         ]
Exemplo n.º 7
0
 def get(self):
     logging.info("Taking asset from %s", self._assets_file)
     with utils.file_lock_context(self._lock_file):
         with open(self._assets_file) as _file:
             all_assets = json.load(_file)
         asset = Munch.fromDict(all_assets.pop(0))
         with open(self._assets_file, "w") as _file:
             json.dump(all_assets, _file)
         self._took_assets.append(asset)
     logging.info("Taken asset: %s", asset)
     return asset
Exemplo n.º 8
0
def create_nodes(cluster_name, image_path, storage_path, master_count,
                 nodes_details, tf, machine_net):
    log.info("Creating tfvars")
    fill_tfvars(image_path=image_path,
                storage_path=storage_path,
                master_count=master_count,
                nodes_details=nodes_details,
                tf_folder=tf.working_dir,
                machine_net=machine_net)
    log.info('Start running terraform')
    with utils.file_lock_context():
        return tf.apply()
def get_network_leases(network_name):
    warnings.warn(
        "get_network_leases is deprecated. Use LibvirtController.get_network_leases "
        "instead.", DeprecationWarning)
    with utils.file_lock_context():
        net = conn.networkLookupByName(network_name)
        leases = net.DHCPLeases(
        )  # TODO: getting the information from the XML dump until dhcp-leases bug is fixed
        hosts = _get_hosts_from_network(net)
        return leases + [
            h for h in hosts
            if h["ipaddr"] not in [ls["ipaddr"] for ls in leases]
        ]
Exemplo n.º 10
0
def create_nodes(tf):
    log.info('Start running terraform')
    with utils.file_lock_context():
        return tf.apply()
Exemplo n.º 11
0
 def release_all(self):
     with utils.file_lock_context(self._lock_file):
         assets_in_use = self._get_assets_in_use_from_assets_file()
         self._remove_taken_assets_from_all_assets_in_use(assets_in_use)
         self._dump_all_assets_in_use_to_assets_file(assets_in_use)
Exemplo n.º 12
0
def day2_nodes_flow(client, terraform_cluster_dir_prefix, tf_folder, cluster,
                    has_ipv_6, num_worker_nodes, api_vip_ip, api_vip_dnsname,
                    install_cluster_flag, day2_type_flag,
                    with_static_network_config, base_cluster_name):
    tf_network_name, total_num_nodes = get_network_num_nodes_from_tf(tf_folder)
    with utils.file_lock_context():
        utils.run_command(
            f'make _apply_terraform CLUSTER_NAME={terraform_cluster_dir_prefix}'
        )
    time.sleep(5)

    if day2_type_flag == "ocp":
        num_nodes_to_wait = total_num_nodes
        installed_status = consts.NodesStatus.INSTALLED
    else:
        num_nodes_to_wait = num_worker_nodes
        installed_status = consts.NodesStatus.DAY2_INSTALLED

    utils.wait_till_nodes_are_ready(nodes_count=num_nodes_to_wait,
                                    network_name=tf_network_name)

    waiting.wait(
        lambda: utils.are_libvirt_nodes_in_cluster_hosts(
            client, cluster.id, num_nodes_to_wait),
        timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
        sleep_seconds=10,
        waiting_for="Nodes to be registered in inventory service",
    )

    set_nodes_hostnames_if_needed(client, tf_folder,
                                  with_static_network_config, has_ipv_6,
                                  tf_network_name, cluster.id)

    utils.wait_till_all_hosts_are_in_status(
        client=client,
        cluster_id=cluster.id,
        nodes_count=num_worker_nodes,
        statuses=[consts.NodesStatus.KNOWN],
        interval=30,
    )

    if install_cluster_flag:
        log.info("Start installing all known nodes in the cluster %s",
                 cluster.id)
        kubeconfig = utils.get_kubeconfig_path(base_cluster_name)
        ocp_orig_ready_nodes = get_ocp_cluster_ready_nodes_num(kubeconfig)
        hosts = client.get_cluster_hosts(cluster.id)
        [
            client.install_day2_host(cluster.id, host['id']) for host in hosts
            if host["status"] == 'known'
        ]

        log.info(
            "Start waiting until all nodes of cluster %s have been installed( reached added-to-existing-clustertate)",
            cluster.id)
        utils.wait_till_all_hosts_are_in_status(
            client=client,
            cluster_id=cluster.id,
            nodes_count=num_nodes_to_wait,
            statuses=[installed_status],
            interval=30,
        )

        log.info(
            "Start waiting until installed nodes has actually been added to the OCP cluster"
        )
        waiting.wait(lambda: wait_nodes_join_ocp_cluster(
            ocp_orig_ready_nodes, num_worker_nodes, day2_type_flag, kubeconfig
        ),
                     timeout_seconds=consts.NODES_REGISTERED_TIMEOUT,
                     sleep_seconds=30,
                     waiting_for="Day2 nodes to be added to OCP cluster",
                     expected_exceptions=Exception)
        log.info("%d worker nodes were successfully added to OCP cluster",
                 num_worker_nodes)
Exemplo n.º 13
0
def clean_virsh_resources(skip_list, resource_filter):
    with utils.file_lock_context():
        clean_domains(skip_list, resource_filter)
        clean_pools(skip_list, resource_filter)
        clean_networks(skip_list, resource_filter)