Ejemplo n.º 1
0
    def test_fetch_dependencies_unpack_parallel(self):
        output_path = tempfile.mkdtemp()
        save_dir = tempfile.mkdtemp()
        # use default parallelism of 4 for test
        pool = multiprocessing.Pool(4)
        target_objs = [{
            "dependencies": [
                {
                    "type": "https",
                    "source":
                    "https://github.com/BurdenBear/kube-charts-mirror/raw/master/docs/nfs-client-provisioner-1.2.8.tgz",
                    "output_path": "nfs-client-provisioner",
                    "unpack": True,
                },
                {
                    "type": "https",
                    "source":
                    "https://github.com/BurdenBear/kube-charts-mirror/raw/master/docs/prometheus-pushgateway-1.2.13.tgz",
                    "output_path": "prometheus-pushgateway",
                    "unpack": True,
                },
            ]
        }]
        try:
            fetch_dependencies(output_path, target_objs, save_dir, False, pool)
            pool.close()
        except Exception as e:
            pool.terminate()
            raise e
        finally:
            pool.join()

        for obj in target_objs[0]["dependencies"]:
            self.assertTrue(
                os.path.isdir(os.path.join(output_path, obj["output_path"])))
            self.assertTrue(
                os.path.isdir(os.path.join(save_dir, obj["output_path"])))
        rmtree(output_path)
        rmtree(save_dir)
Ejemplo n.º 2
0
def compile_targets(inventory_path, search_paths, output_path, parallel,
                    targets, labels, ref_controller, **kwargs):
    """
    Searches and loads target files, and runs compile_target() on a
    multiprocessing pool with parallel number of processes.
    kwargs are passed to compile_target()
    """
    # temp_path will hold compiled items
    temp_path = tempfile.mkdtemp(suffix=".kapitan")

    updated_targets = targets
    try:
        updated_targets = search_targets(inventory_path, targets, labels)
    except CompileError as e:
        logger.error(e)
        sys.exit(1)

    # If --cache is set
    if kwargs.get("cache"):
        additional_cache_paths = kwargs.get("cache_paths")
        generate_inv_cache_hashes(inventory_path, targets,
                                  additional_cache_paths)

        if not targets:
            updated_targets = changed_targets(inventory_path, output_path)
            logger.debug("Changed targets since last compilation: %s",
                         updated_targets)
            if len(updated_targets) == 0:
                logger.info("No changes since last compilation.")
                return

    pool = multiprocessing.Pool(parallel)

    try:
        target_objs = load_target_inventory(inventory_path, updated_targets)

        # append "compiled" to output_path so we can safely overwrite it
        compile_path = os.path.join(output_path, "compiled")
        worker = partial(
            compile_target,
            search_paths=search_paths,
            compile_path=temp_path,
            ref_controller=ref_controller,
            **kwargs,
        )

        if not target_objs:
            raise CompileError("Error: no targets found")

        if kwargs.get("fetch_dependencies", False):
            fetch_dependencies(target_objs, pool)

        # compile_target() returns None on success
        # so p is only not None when raising an exception
        [p.get() for p in pool.imap_unordered(worker, target_objs) if p]

        os.makedirs(compile_path, exist_ok=True)

        # if '-t' is set on compile or only a few changed, only override selected targets
        if updated_targets:
            for target in updated_targets:
                compile_path_target = os.path.join(compile_path, target)
                temp_path_target = os.path.join(temp_path, target)

                os.makedirs(compile_path_target, exist_ok=True)

                shutil.rmtree(compile_path_target)
                shutil.copytree(temp_path_target, compile_path_target)
                logger.debug("Copied %s into %s", temp_path_target,
                             compile_path_target)
        # otherwise override all targets
        else:
            shutil.rmtree(compile_path)
            shutil.copytree(temp_path, compile_path)
            logger.debug("Copied %s into %s", temp_path, compile_path)

        # validate the compiled outputs
        if kwargs.get("validate", False):
            validate_map = create_validate_mapping(target_objs, compile_path)
            worker = partial(schema_validate_kubernetes_output,
                             cache_dir=kwargs.get("schemas_path", "./schemas"))
            [
                p.get()
                for p in pool.imap_unordered(worker, validate_map.items()) if p
            ]

        # Save inventory and folders cache
        save_inv_cache(compile_path, targets)
        pool.close()

    except ReclassException as e:
        if isinstance(e, NotFoundError):
            logger.error("Inventory reclass error: inventory not found")
        else:
            logger.error("Inventory reclass error: %s", e.message)
        raise InventoryError(e.message)
    except Exception as e:
        # if compile worker fails, terminate immediately
        pool.terminate()
        logger.debug("Compile pool terminated")
        # only print traceback for errors we don't know about
        if not isinstance(e, KapitanError):
            logger.exception("Unknown (Non-Kapitan) Error occurred")

        logger.error("\n")
        logger.error(e)
        sys.exit(1)
    finally:
        # always wait for other worker processes to terminate
        pool.join()
        shutil.rmtree(temp_path)
        logger.debug("Removed %s", temp_path)