Exemplo n.º 1
0
def test_execute_celery_docker():
    docker_image = test_project_docker_image()
    docker_config = {
        "image": docker_image,
        "env_vars": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"],
    }

    if IS_BUILDKITE:
        ecr_client = boto3.client("ecr", region_name="us-west-1")
        token = ecr_client.get_authorization_token()
        username, password = (base64.b64decode(
            token["authorizationData"][0]
            ["authorizationToken"]).decode().split(":"))
        registry = token["authorizationData"][0]["proxyEndpoint"]

        docker_config["registry"] = {
            "url": registry,
            "username": username,
            "password": password,
        }

    else:
        try:
            client = docker.from_env()
            client.images.get(docker_image)
            print(  # pylint: disable=print-call
                "Found existing image tagged {image}, skipping image build. To rebuild, first run: "
                "docker rmi {image}".format(image=docker_image))
        except docker.errors.ImageNotFound:
            build_and_tag_test_image(docker_image)

    with seven.TemporaryDirectory() as temp_dir:

        run_config = merge_dicts(
            merge_yamls([
                os.path.join(test_project_environments_path(), "env.yaml"),
                os.path.join(test_project_environments_path(), "env_s3.yaml"),
            ]),
            {
                "execution": {
                    "celery-docker": {
                        "config": {
                            "docker": docker_config,
                            "config_source": {
                                "task_always_eager": True
                            },
                        }
                    }
                },
            },
        )

        result = execute_pipeline(
            get_test_project_recon_pipeline("docker_celery_pipeline"),
            run_config=run_config,
            instance=DagsterInstance.local_temp(temp_dir),
        )
        assert result.success
Exemplo n.º 2
0
def test_execute_celery_docker():
    docker_image = test_project_docker_image()
    docker_config = {
        'image': docker_image,
        'env_vars': ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'],
    }

    if IS_BUILDKITE:
        ecr_client = boto3.client('ecr', region_name='us-west-1')
        token = ecr_client.get_authorization_token()
        username, password = (base64.b64decode(
            token['authorizationData'][0]
            ['authorizationToken']).decode().split(':'))
        registry = token['authorizationData'][0]['proxyEndpoint']

        docker_config['registry'] = {
            'url': registry,
            'username': username,
            'password': password,
        }

    else:
        try:
            client = docker.from_env()
            client.images.get(docker_image)
            print(  # pylint: disable=print-call
                'Found existing image tagged {image}, skipping image build. To rebuild, first run: '
                'docker rmi {image}'.format(image=docker_image))
        except docker.errors.ImageNotFound:
            build_and_tag_test_image(docker_image)

    with seven.TemporaryDirectory() as temp_dir:

        run_config = merge_dicts(
            merge_yamls([
                os.path.join(test_project_environments_path(), 'env.yaml'),
                os.path.join(test_project_environments_path(), 'env_s3.yaml'),
            ]),
            {
                'execution': {
                    'celery-docker': {
                        'config': {
                            'docker': docker_config,
                            'config_source': {
                                'task_always_eager': True
                            },
                        }
                    }
                },
            },
        )

        result = execute_pipeline(
            get_test_project_recon_pipeline('docker_celery_pipeline'),
            run_config=run_config,
            instance=DagsterInstance.local_temp(temp_dir),
        )
        assert result.success
Exemplo n.º 3
0
    def _cluster_provider(request):
        from .kind import kind_cluster_exists, kind_cluster, kind_load_images

        if IS_BUILDKITE:
            print("Installing ECR credentials...")
            check_output(
                "aws ecr get-login --no-include-email --region us-west-2 | sh",
                shell=True)

        provider = request.config.getoption("--cluster-provider")

        # Use a kind cluster
        if provider == "kind":
            cluster_name = request.config.getoption("--kind-cluster")

            # Cluster will be deleted afterwards unless this is set.
            # This is to allow users to reuse an existing cluster in local test by running
            # `pytest --kind-cluster my-cluster --no-cleanup` -- this avoids the per-test run
            # overhead of cluster setup and teardown
            should_cleanup = True if IS_BUILDKITE else not request.config.getoption(
                "--no-cleanup")

            existing_cluster = kind_cluster_exists(cluster_name)

            with kind_cluster(cluster_name,
                              should_cleanup=should_cleanup) as cluster_config:
                if not IS_BUILDKITE and not existing_cluster:
                    docker_image = get_test_project_docker_image()
                    try:
                        client = docker.from_env()
                        client.images.get(docker_image)
                        print(  # pylint: disable=print-call
                            "Found existing image tagged {image}, skipping image build. To rebuild, first run: "
                            "docker rmi {image}".format(image=docker_image))
                    except docker.errors.ImageNotFound:
                        build_and_tag_test_image(docker_image)
                    kind_load_images(
                        cluster_name=cluster_config.name,
                        local_dagster_test_image=docker_image,
                        additional_images=additional_kind_images,
                    )
                yield cluster_config

        # Use cluster from kubeconfig
        elif provider == "kubeconfig":
            kubeconfig_file = os.getenv(
                "KUBECONFIG", os.path.expandvars("${HOME}/.kube/config"))
            kubernetes.config.load_kube_config(config_file=kubeconfig_file)
            yield ClusterConfig(name="from_system_kubeconfig",
                                kubeconfig_file=kubeconfig_file)

        else:
            raise Exception("unknown cluster provider %s" % provider)
Exemplo n.º 4
0
def dagster_docker_image():
    docker_image = test_project_docker_image()

    if not IS_BUILDKITE:
        try:
            client = docker.from_env()
            client.images.get(docker_image)
            print(  # pylint: disable=print-call
                "Found existing image tagged {image}, skipping image build. To rebuild, first run: "
                "docker rmi {image}".format(image=docker_image))
        except docker.errors.ImageNotFound:
            build_and_tag_test_image(docker_image)

    return docker_image
Exemplo n.º 5
0
    def _cluster_provider(request):
        from .kind import kind_cluster_exists, kind_cluster, kind_load_images

        if IS_BUILDKITE:
            print('Installing ECR credentials...')
            check_output(
                'aws ecr get-login --no-include-email --region us-west-1 | sh',
                shell=True)

        provider = request.config.getoption('--cluster-provider')

        # Use a kind cluster
        if provider == 'kind':
            cluster_name = request.config.getoption('--kind-cluster')

            # Cluster will be deleted afterwards unless this is set.
            # This is to allow users to reuse an existing cluster in local test by running
            # `pytest --kind-cluster my-cluster --no-cleanup` -- this avoids the per-test run
            # overhead of cluster setup and teardown
            should_cleanup = True if IS_BUILDKITE else not request.config.getoption(
                '--no-cleanup')

            existing_cluster = kind_cluster_exists(cluster_name)

            with kind_cluster(cluster_name,
                              should_cleanup=should_cleanup) as cluster_config:
                if not IS_BUILDKITE and not existing_cluster:
                    docker_image = test_project_docker_image()
                    build_and_tag_test_image(docker_image)
                    kind_load_images(
                        cluster_name=cluster_config.name,
                        local_dagster_test_image=docker_image,
                        additional_images=additional_kind_images,
                    )
                yield cluster_config

        # Use cluster from kubeconfig
        elif provider == 'kubeconfig':
            kubeconfig_file = os.getenv(
                'KUBECONFIG', os.path.expandvars('${HOME}/.kube/config'))
            kubernetes.config.load_kube_config(config_file=kubeconfig_file)
            yield ClusterConfig(name='from_system_kubeconfig',
                                kubeconfig_file=kubeconfig_file)

        else:
            raise Exception('unknown cluster provider %s' % provider)