Exemplo n.º 1
0
def launch_scheduled_execution(output_file, schedule_name, **kwargs):
    with ipc_write_stream(output_file) as stream:
        recon_repo = get_reconstructable_repository_from_origin_kwargs(kwargs)

        instance = DagsterInstance.get()

        repo_def = recon_repo.get_definition()

        if not repo_def.has_schedule_def(schedule_name):
            raise DagsterInvariantViolationError(
                'Schedule named "{sched}" not found in repository "{repo}"'.format(
                    sched=schedule_name, repo=repo_def.name
                )
            )

        schedule = recon_repo.get_reconstructable_schedule(schedule_name)
        schedule_def = schedule.get_definition()

        with _schedule_tick_state(
            instance,
            ScheduleTickData(
                schedule_origin_id=schedule.get_origin_id(),
                schedule_name=schedule_def.name,
                cron_schedule=schedule_def.cron_schedule,
                timestamp=time.time(),
                status=ScheduleTickStatus.STARTED,
            ),
        ) as tick:

            pipeline = recon_repo.get_reconstructable_pipeline(
                schedule_def.pipeline_name
            ).subset_for_execution(schedule_def.solid_selection)

            _launch_scheduled_execution(instance, schedule_def, pipeline, tick, stream)
Exemplo n.º 2
0
def launch_scheduled_execution(output_file, schedule_name, **kwargs):
    with ipc_write_stream(output_file) as stream:
        instance = DagsterInstance.get()

        recon_repo = get_reconstructable_repository_from_origin_kwargs(kwargs)
        schedule = recon_repo.get_reconstructable_schedule(schedule_name)

        # open the tick scope before we call get_definition to make sure
        # load errors are stored in DB
        with _schedule_tick_state(
                instance,
                ScheduleTickData(
                    schedule_origin_id=schedule.get_origin_id(),
                    schedule_name=schedule_name,
                    timestamp=time.time(),
                    cron_schedule=None,  # not yet loaded
                    status=ScheduleTickStatus.STARTED,
                ),
        ) as tick:

            schedule_def = schedule.get_definition()

            tick.update_with_status(
                status=ScheduleTickStatus.STARTED,
                cron_schedule=schedule_def.cron_schedule,
            )

            pipeline = recon_repo.get_reconstructable_pipeline(
                schedule_def.pipeline_name).subset_for_execution(
                    schedule_def.solid_selection)

            _launch_scheduled_execution(instance, schedule_def, pipeline, tick,
                                        stream)
Exemplo n.º 3
0
def execute_pipeline_command(output_file, solid_subset, environment_dict, mode,
                             **kwargs):
    '''
    This command might want to take a runId instead of current arguments

    1. Should take optional flags to determine where to store the log output
    2. Investigate python logging library to see what we can do there
    '''

    with ipc_write_stream(output_file) as stream:
        recon_pipeline = recon_pipeline_for_cli_args(kwargs)
        definition = recon_pipeline.get_definition()

        if solid_subset:
            definition = definition.subset_for_execution(
                solid_subset.split(","))

        # This can raise a ValueError, but this is caught by the broad-except
        # and the exception is serialized as a SerializableErrorInfo
        environment_dict = json.loads(environment_dict)

        instance = DagsterInstance.get()

        for event in execute_pipeline_iterator(
                definition,
                environment_dict=environment_dict,
                mode=mode,
                instance=instance,
        ):
            stream.send(event)
Exemplo n.º 4
0
def _execute_run_command_body(
    output_file,
    recon_repo,
    pipeline_run_id,
    instance_ref_json,
):
    with ipc_write_stream(output_file) as stream:
        instance = _get_instance(stream, instance_ref_json)
        if not instance:
            return

        pipeline_run = instance.get_run_by_id(pipeline_run_id)

        pid = os.getpid()
        instance.report_engine_event(
            'Started process for pipeline (pid: {pid}).'.format(pid=pid),
            pipeline_run,
            EngineEventData.in_process(pid,
                                       marker_end='cli_api_subprocess_init'),
        )

        recon_pipeline = _recon_pipeline(stream, recon_repo, pipeline_run)

        # Perform setup so that termination of the execution will unwind and report to the
        # instance correctly
        setup_interrupt_support()

        try:
            for event in execute_run_iterator(recon_pipeline, pipeline_run,
                                              instance):
                stream.send(event)
        except DagsterSubprocessError as err:
            if not all([
                    err_info.cls_name == 'KeyboardInterrupt'
                    for err_info in err.subprocess_error_infos
            ]):
                instance.report_engine_event(
                    'An exception was thrown during execution that is likely a framework error, '
                    'rather than an error in user code.',
                    pipeline_run,
                    EngineEventData.engine_error(
                        serializable_error_info_from_exc_info(sys.exc_info())),
                )
        except Exception:  # pylint: disable=broad-except
            instance.report_engine_event(
                'An exception was thrown during execution that is likely a framework error, '
                'rather than an error in user code.',
                pipeline_run,
                EngineEventData.engine_error(
                    serializable_error_info_from_exc_info(sys.exc_info())),
            )
        finally:
            instance.report_engine_event(
                'Process for pipeline exited (pid: {pid}).'.format(pid=pid),
                pipeline_run,
            )
Exemplo n.º 5
0
def test_write_empty_stream():

    with safe_tempfile_path() as f:
        with ipc_write_stream(f) as _:
            pass

        messages = []
        for message in ipc_read_event_stream(f):
            messages.append(message)

        assert len(messages) == 0
Exemplo n.º 6
0
def execute_run_command(input_file, output_file):
    args = check.inst(read_unary_input(input_file), ExecuteRunArgs)
    recon_pipeline = recon_pipeline_from_origin(args.pipeline_origin)
    with DagsterInstance.from_ref(args.instance_ref) as instance:
        with ipc_write_stream(output_file) as ipc_stream:

            def send_to_stream(event):
                ipc_stream.send(event)

            return _execute_run_command_body(recon_pipeline,
                                             args.pipeline_run_id, instance,
                                             send_to_stream)
Exemplo n.º 7
0
def _execute_run_command_body(output_file, recon_pipeline, pipeline_run_id,
                              instance_ref):
    with ipc_write_stream(output_file) as stream:

        # we need to send but the fact that we have loaded the args so the calling
        # process knows it is safe to clean up the temp input file
        stream.send(ExecuteRunArgsLoadComplete())

        instance = DagsterInstance.from_ref(instance_ref)
        pipeline_run = instance.get_run_by_id(pipeline_run_id)

        pid = os.getpid()
        instance.report_engine_event(
            'Started process for pipeline (pid: {pid}).'.format(pid=pid),
            pipeline_run,
            EngineEventData.in_process(pid,
                                       marker_end='cli_api_subprocess_init'),
        )

        # Perform setup so that termination of the execution will unwind and report to the
        # instance correctly
        setup_interrupt_support()

        try:
            for event in execute_run_iterator(recon_pipeline, pipeline_run,
                                              instance):
                stream.send(event)
        except DagsterSubprocessError as err:
            if not all([
                    err_info.cls_name == 'KeyboardInterrupt'
                    for err_info in err.subprocess_error_infos
            ]):
                instance.report_engine_event(
                    'An exception was thrown during execution that is likely a framework error, '
                    'rather than an error in user code.',
                    pipeline_run,
                    EngineEventData.engine_error(
                        serializable_error_info_from_exc_info(sys.exc_info())),
                )
        except Exception:  # pylint: disable=broad-except
            instance.report_engine_event(
                'An exception was thrown during execution that is likely a framework error, '
                'rather than an error in user code.',
                pipeline_run,
                EngineEventData.engine_error(
                    serializable_error_info_from_exc_info(sys.exc_info())),
            )
        finally:
            instance.report_engine_event(
                'Process for pipeline exited (pid: {pid}).'.format(pid=pid),
                pipeline_run,
            )
Exemplo n.º 8
0
def test_write_error_stream():
    with safe_tempfile_path() as filename:
        with ipc_write_stream(filename) as _:
            raise Exception("uh oh")

        messages = []
        for message in ipc_read_event_stream(filename):
            messages.append(message)

        assert len(messages) == 1
        message = messages[0]

        assert isinstance(message, IPCErrorMessage)
        assert "uh oh" in message.serializable_error_info.message
Exemplo n.º 9
0
def test_write_error_stream():
    with tempfile.NamedTemporaryFile() as f:
        with ipc_write_stream(f.name) as _:
            raise Exception('uh oh')

        messages = []
        for message in ipc_read_event_stream(f.name):
            messages.append(message)

        assert len(messages) == 1
        message = messages[0]

        assert isinstance(message, IPCErrorMessage)
        assert 'uh oh' in message.serializable_error_info.message
Exemplo n.º 10
0
    def serve(self):
        # Unfortunately it looks like ports bind late (here) and so this can fail with an error
        # from C++ like:
        #
        #    E0625 08:46:56.180112000 4697443776 server_chttp2.cc:40]
        #    {"created":"@1593089216.180085000","description":"Only 1 addresses added out of total
        #    2 resolved","file":"src/core/ext/transport/chttp2/server/chttp2_server.cc",
        #    "file_line":406,"referenced_errors":[{"created":"@1593089216.180083000","description":
        #    "Unable to configure socket","fd":6,"file":
        #    "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":217,
        #    "referenced_errors":[{"created":"@1593089216.180079000",
        #    "description":"Address already in use","errno":48,"file":
        #    "src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":190,"os_error":
        #    "Address already in use","syscall":"bind"}]}]}
        #
        # This is printed to stdout and there is no return value from server.start or exception
        # raised in Python that we can use to handle this. The standard recipes for hijacking C
        # stdout (so we could inspect this output and respond accordingly), e.g.
        # https://eli.thegreenplace.net/2015/redirecting-all-kinds-of-stdout-in-python/, don't seem
        # to work (at least on Mac OS X) against grpc, and in any case would involve a huge
        # cross-version and cross-platform maintenance burden. We have an issue open against grpc,
        # https://github.com/grpc/grpc/issues/23315, and our own tracking issue at

        self.server.start()

        # Note: currently this is hardcoded as serving, since both services are cohosted
        # pylint: disable=no-member
        self._health_servicer.set("DagsterApi",
                                  health_pb2.HealthCheckResponse.SERVING)

        if self._ipc_output_file:
            with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                ipc_stream.send(GrpcServerStartedEvent())

        server_termination_thread = threading.Thread(
            target=server_termination_target,
            args=[self._server_termination_event, self.server],
            name="grpc-server-termination",
        )

        server_termination_thread.daemon = True

        server_termination_thread.start()

        self.server.wait_for_termination()

        server_termination_thread.join()

        self._api_servicer.cleanup()
Exemplo n.º 11
0
def execute_run_command(input_file, output_file):
    # Ensure that interrupts from the run launcher only happen inside user code or specially
    # designated checkpoints
    with delay_interrupts():
        args = check.inst(read_unary_input(input_file), ExecuteRunArgs)
        recon_pipeline = recon_pipeline_from_origin(args.pipeline_origin)
        with DagsterInstance.from_ref(args.instance_ref) as instance:
            with ipc_write_stream(output_file) as ipc_stream:

                def send_to_stream(event):
                    ipc_stream.send(event)

                return _execute_run_command_body(recon_pipeline,
                                                 args.pipeline_run_id,
                                                 instance, send_to_stream)
Exemplo n.º 12
0
def launch_scheduled_execution(output_file, schedule_name, **kwargs):
    with ipc_write_stream(output_file) as stream:
        with DagsterInstance.get() as instance:
            repository_origin = get_repository_origin_from_kwargs(kwargs)
            job_origin = repository_origin.get_job_origin(schedule_name)

            # open the tick scope before we load any external artifacts so that
            # load errors are stored in DB
            with _schedule_tick_state(
                    instance,
                    stream,
                    JobTickData(
                        job_origin_id=job_origin.get_id(),
                        job_name=schedule_name,
                        job_type=JobType.SCHEDULE,
                        status=JobTickStatus.STARTED,
                        timestamp=time.time(),
                    ),
            ) as tick_context:
                with get_repository_location_from_kwargs(
                        kwargs) as repo_location:
                    repo_dict = repo_location.get_repositories()
                    check.invariant(
                        repo_dict and len(repo_dict) == 1,
                        "Passed in arguments should reference exactly one repository, instead there are {num_repos}"
                        .format(num_repos=len(repo_dict)),
                    )
                    external_repo = next(iter(repo_dict.values()))
                    check.invariant(
                        schedule_name in [
                            schedule.name for schedule in
                            external_repo.get_external_schedules()
                        ],
                        "Could not find schedule named {schedule_name}".format(
                            schedule_name=schedule_name),
                    )
                    external_schedule = external_repo.get_external_schedule(
                        schedule_name)
                    tick_context.update_with_status(
                        status=JobTickStatus.STARTED)
                    _launch_scheduled_execution(
                        instance,
                        repo_location,
                        external_repo,
                        external_schedule,
                        tick_context,
                        stream,
                    )
Exemplo n.º 13
0
def test_write_error_with_custom_message():
    with tempfile.NamedTemporaryFile() as f:
        with ipc_write_stream(f.name) as stream:
            try:
                raise Exception('uh oh')
            except:  # pylint: disable=bare-except
                stream.send_error(sys.exc_info(), message='custom')

        messages = []
        for message in ipc_read_event_stream(f.name):
            messages.append(message)

        assert len(messages) == 1
        ipc_message = messages[0]

        assert isinstance(ipc_message, IPCErrorMessage)
        assert 'uh oh' in ipc_message.serializable_error_info.message
        assert ipc_message.message == 'custom'
Exemplo n.º 14
0
def test_write_error_with_custom_message():
    with safe_tempfile_path() as filename:
        with ipc_write_stream(filename) as stream:
            try:
                raise Exception("uh oh")
            except:
                stream.send_error(sys.exc_info(), message="custom")

        messages = []
        for message in ipc_read_event_stream(filename):
            messages.append(message)

        assert len(messages) == 1
        ipc_message = messages[0]

        assert isinstance(ipc_message, IPCErrorMessage)
        assert "uh oh" in ipc_message.serializable_error_info.message
        assert ipc_message.message == "custom"
Exemplo n.º 15
0
def launch_scheduled_execution(output_file, schedule_name, **kwargs):
    with ipc_write_stream(output_file) as stream:
        instance = DagsterInstance.get()
        repository_origin = get_repository_origin_from_kwargs(kwargs)
        schedule_origin = repository_origin.get_schedule_origin(schedule_name)

        # open the tick scope before we load any external artifacts so that
        # load errors are stored in DB
        with _schedule_tick_state(
                instance,
                stream,
                ScheduleTickData(
                    schedule_origin_id=schedule_origin.get_id(),
                    schedule_name=schedule_name,
                    timestamp=time.time(),
                    cron_schedule=None,  # not yet loaded
                    status=ScheduleTickStatus.STARTED,
                ),
        ) as tick:
            repo_location = get_repository_location_from_kwargs(
                kwargs, instance)
            repo_dict = repo_location.get_repositories()
            check.invariant(
                repo_dict and len(repo_dict) == 1,
                'Passed in arguments should reference exactly one repository, instead there are {num_repos}'
                .format(num_repos=len(repo_dict)),
            )
            external_repo = next(iter(repo_dict.values()))
            check.invariant(
                schedule_name in [
                    schedule.name
                    for schedule in external_repo.get_external_schedules()
                ],
                'Could not find schedule named {schedule_name}'.format(
                    schedule_name=schedule_name),
            )
            external_schedule = external_repo.get_external_schedule(
                schedule_name)
            tick.update_with_status(
                status=ScheduleTickStatus.STARTED,
                cron_schedule=external_schedule.cron_schedule,
            )
            _launch_scheduled_execution(instance, repo_location, external_repo,
                                        external_schedule, tick, stream)
Exemplo n.º 16
0
def test_write_read_stream():
    @whitelist_for_serdes
    class TestMessage(namedtuple("_TestMessage", "message")):
        def __new__(cls, message):
            return super(TestMessage, cls).__new__(cls, message)

    with safe_tempfile_path() as f:
        message_1 = TestMessage(message="hello")
        message_2 = TestMessage(message="world")

        with ipc_write_stream(f) as stream:
            stream.send(message_1)
            stream.send(message_2)

        messages = []
        for message in ipc_read_event_stream(f):
            messages.append(message)

        assert messages[0] == message_1
        assert messages[1] == message_2
Exemplo n.º 17
0
def execute_pipeline_command(output_file, solid_subset, environment_dict,
                             instance_ref, mode, **kwargs):
    '''
    This command might want to take a runId instead of current arguments

    1. Should take optional flags to determine where to store the log output
    2. Investigate python logging library to see what we can do there
    '''

    with ipc_write_stream(output_file) as stream:
        recon_pipeline = recon_pipeline_for_cli_args(kwargs)
        definition = recon_pipeline.get_definition()

        if solid_subset:
            definition = definition.subset_for_execution(
                solid_subset.split(","))

        # This can raise a ValueError, but this is caught by the broad-except
        # and the exception is serialized as a SerializableErrorInfo
        environment_dict = json.loads(environment_dict)

        try:
            instance = DagsterInstance.from_ref(
                deserialize_json_to_dagster_namedtuple(instance_ref))
        except:  # pylint: disable=bare-except
            stream.send_error(
                sys.exc_info(),
                message='Could not deserialize {json_string}'.format(
                    json_string=instance_ref),
            )
            return

        for event in execute_pipeline_iterator(
                definition,
                environment_dict=environment_dict,
                mode=mode,
                instance=instance,
        ):
            stream.send(event)
Exemplo n.º 18
0
    def __init__(
        self,
        host="localhost",
        port=None,
        socket=None,
        max_workers=None,
        loadable_target_origin=None,
        heartbeat=False,
        heartbeat_timeout=30,
        lazy_load_user_code=False,
        ipc_output_file=None,
        fixed_server_id=None,
        entry_point=None,
        container_context=None,
    ):
        check.opt_str_param(host, "host")
        check.opt_int_param(port, "port")
        check.opt_str_param(socket, "socket")
        check.opt_int_param(max_workers, "max_workers")
        check.opt_inst_param(loadable_target_origin, "loadable_target_origin",
                             LoadableTargetOrigin)
        check.invariant(
            port is not None if seven.IS_WINDOWS else True,
            "You must pass a valid `port` on Windows: `socket` not supported.",
        )
        check.invariant(
            (port or socket) and not (port and socket),
            "You must pass one and only one of `port` or `socket`.",
        )
        check.invariant(
            host is not None if port else True,
            "Must provide a host when serving on a port",
        )
        check.bool_param(heartbeat, "heartbeat")
        check.int_param(heartbeat_timeout, "heartbeat_timeout")
        self._ipc_output_file = check.opt_str_param(ipc_output_file,
                                                    "ipc_output_file")
        check.opt_str_param(fixed_server_id, "fixed_server_id")

        check.invariant(heartbeat_timeout > 0,
                        "heartbeat_timeout must be greater than 0")
        check.invariant(
            max_workers is None or max_workers > 1 if heartbeat else True,
            "max_workers must be greater than 1 or set to None if heartbeat is True. "
            "If set to None, the server will use the gRPC default.",
        )

        self.server = grpc.server(
            ThreadPoolExecutor(max_workers=max_workers),
            compression=grpc.Compression.Gzip,
            options=[
                ("grpc.max_send_message_length", max_send_bytes()),
                ("grpc.max_receive_message_length", max_rx_bytes()),
            ],
        )
        self._server_termination_event = threading.Event()

        try:
            self._api_servicer = DagsterApiServer(
                server_termination_event=self._server_termination_event,
                loadable_target_origin=loadable_target_origin,
                heartbeat=heartbeat,
                heartbeat_timeout=heartbeat_timeout,
                lazy_load_user_code=lazy_load_user_code,
                fixed_server_id=fixed_server_id,
                entry_point=entry_point,
                container_context=container_context,
            )
        except Exception:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(
                        GrpcServerLoadErrorEvent(
                            error_info=serializable_error_info_from_exc_info(
                                sys.exc_info())))
            raise

        # Create a health check servicer
        self._health_servicer = health.HealthServicer()
        health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer,
                                                     self.server)

        add_DagsterApiServicer_to_server(self._api_servicer, self.server)

        if port:
            server_address = host + ":" + str(port)
        else:
            server_address = "unix:" + os.path.abspath(socket)

        # grpc.Server.add_insecure_port returns:
        # - 0 on failure
        # - port number when a port is successfully bound
        # - 1 when a UDS is successfully bound
        res = self.server.add_insecure_port(server_address)
        if socket and res != 1:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(GrpcServerFailedToBindEvent())
            raise CouldNotBindGrpcServerToAddress(socket)
        if port and res != port:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(GrpcServerFailedToBindEvent())
            raise CouldNotBindGrpcServerToAddress(port)
Exemplo n.º 19
0
def launch_scheduled_execution(output_file, schedule_name,
                               override_system_timezone, **kwargs):
    with (mock_system_timezone(override_system_timezone)
          if override_system_timezone else nullcontext()):
        with ipc_write_stream(output_file) as stream:
            with DagsterInstance.get() as instance:
                repository_origin = get_repository_origin_from_kwargs(kwargs)
                job_origin = repository_origin.get_job_origin(schedule_name)

                # open the tick scope before we load any external artifacts so that
                # load errors are stored in DB
                with _schedule_tick_context(
                        instance,
                        stream,
                        JobTickData(
                            job_origin_id=job_origin.get_id(),
                            job_name=schedule_name,
                            job_type=JobType.SCHEDULE,
                            status=JobTickStatus.STARTED,
                            timestamp=time.time(),
                        ),
                ) as tick_context:
                    with get_repository_location_from_kwargs(
                            kwargs) as repo_location:
                        repo_dict = repo_location.get_repositories()
                        check.invariant(
                            repo_dict and len(repo_dict) == 1,
                            "Passed in arguments should reference exactly one repository, instead there are {num_repos}"
                            .format(num_repos=len(repo_dict)),
                        )
                        external_repo = next(iter(repo_dict.values()))
                        if not schedule_name in [
                                schedule.name for schedule in
                                external_repo.get_external_schedules()
                        ]:
                            raise DagsterInvariantViolationError(
                                "Could not find schedule named {schedule_name}"
                                .format(schedule_name=schedule_name), )

                        external_schedule = external_repo.get_external_schedule(
                            schedule_name)

                        # Validate that either the schedule has no timezone or it matches
                        # the system timezone
                        schedule_timezone = external_schedule.execution_timezone
                        if schedule_timezone:
                            system_timezone = pendulum.now().timezone.name

                            if system_timezone != external_schedule.execution_timezone:
                                raise DagsterInvariantViolationError(
                                    "Schedule {schedule_name} is set to execute in {schedule_timezone}, "
                                    "but this scheduler can only run in the system timezone, "
                                    "{system_timezone}. Use DagsterDaemonScheduler if you want to be able "
                                    "to execute schedules in arbitrary timezones."
                                    .format(
                                        schedule_name=external_schedule.name,
                                        schedule_timezone=schedule_timezone,
                                        system_timezone=system_timezone,
                                    ), )

                        _launch_scheduled_executions(instance, repo_location,
                                                     external_repo,
                                                     external_schedule,
                                                     tick_context)
Exemplo n.º 20
0
    def __init__(
        self,
        host="localhost",
        port=None,
        socket=None,
        max_workers=1,
        loadable_target_origin=None,
        heartbeat=False,
        heartbeat_timeout=30,
        lazy_load_user_code=False,
        ipc_output_file=None,
        fixed_server_id=None,
    ):
        check.opt_str_param(host, "host")
        check.opt_int_param(port, "port")
        check.opt_str_param(socket, "socket")
        check.int_param(max_workers, "max_workers")
        check.opt_inst_param(loadable_target_origin, "loadable_target_origin", LoadableTargetOrigin)
        check.invariant(
            port is not None if seven.IS_WINDOWS else True,
            "You must pass a valid `port` on Windows: `socket` not supported.",
        )
        check.invariant(
            (port or socket) and not (port and socket),
            "You must pass one and only one of `port` or `socket`.",
        )
        check.invariant(
            host is not None if port else True, "Must provide a host when serving on a port",
        )
        check.bool_param(heartbeat, "heartbeat")
        check.int_param(heartbeat_timeout, "heartbeat_timeout")
        self._ipc_output_file = check.opt_str_param(ipc_output_file, "ipc_output_file")
        check.opt_str_param(fixed_server_id, "fixed_server_id")

        check.invariant(heartbeat_timeout > 0, "heartbeat_timeout must be greater than 0")
        check.invariant(
            max_workers > 1 if heartbeat else True,
            "max_workers must be greater than 1 if heartbeat is True",
        )

        self.server = grpc.server(ThreadPoolExecutor(max_workers=max_workers))
        self._server_termination_event = threading.Event()

        self._api_servicer = DagsterApiServer(
            server_termination_event=self._server_termination_event,
            loadable_target_origin=loadable_target_origin,
            heartbeat=heartbeat,
            heartbeat_timeout=heartbeat_timeout,
            lazy_load_user_code=lazy_load_user_code,
            fixed_server_id=fixed_server_id,
        )

        # Create a health check servicer
        self._health_servicer = health.HealthServicer()
        health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer, self.server)

        add_DagsterApiServicer_to_server(self._api_servicer, self.server)

        if port:
            server_address = host + ":" + str(port)
        else:
            server_address = "unix:" + os.path.abspath(socket)

        # grpc.Server.add_insecure_port returns:
        # - 0 on failure
        # - port number when a port is successfully bound
        # - 1 when a UDS is successfully bound
        res = self.server.add_insecure_port(server_address)
        if socket and res != 1:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(GrpcServerFailedToBindEvent())
            raise CouldNotBindGrpcServerToAddress(socket)
        if port and res != port:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(GrpcServerFailedToBindEvent())
            raise CouldNotBindGrpcServerToAddress(port)