コード例 #1
0
ファイル: stats.py プロジェクト: helloworld/dagster
 def __new__(
     cls,
     start_time=None,
     end_time=None,
 ):
     return super(RunStepMarker, cls).__new__(
         cls,
         start_time=check.opt_float_param(start_time, "start_time"),
         end_time=check.opt_float_param(end_time, "end_time"),
     )
コード例 #2
0
    def float(value, label, description=None):
        '''Static constructor for a metadata entry containing float as
        :py:class:`FloatMetadataEntryData`.

        Args:
            value (Optional[float]): The float value contained by this metadata entry.
            label (str): Short display label for this metadata entry.
            description (Optional[str]): A human-readable description of this metadata entry.
        '''
        check.opt_float_param(value, 'value')
        return EventMetadataEntry(label, description,
                                  FloatMetadataEntryData(value))
コード例 #3
0
    def _add_filter_limit(self, query, before=None, after=None, limit=None):
        check.opt_float_param(before, "before")
        check.opt_float_param(after, "after")
        check.opt_int_param(limit, "limit")

        if before:
            query = query.where(JobTickTable.c.timestamp < utc_datetime_from_timestamp(before))
        if after:
            query = query.where(JobTickTable.c.timestamp > utc_datetime_from_timestamp(after))
        if limit:
            query = query.limit(limit)
        return query
コード例 #4
0
ファイル: fetch_assets.py プロジェクト: amarrella/dagster
def get_asset_events(graphene_info, asset_key, partitions=None, limit=None, before_timestamp=None):
    check.inst_param(asset_key, "asset_key", AssetKey)
    check.opt_int_param(limit, "limit")
    check.opt_float_param(before_timestamp, "before_timestamp")
    instance = graphene_info.context.instance
    event_records = instance.get_event_records(
        EventRecordsFilter(
            event_type=DagsterEventType.ASSET_MATERIALIZATION,
            asset_key=asset_key,
            asset_partitions=partitions,
            before_timestamp=before_timestamp,
        ),
        limit=limit,
    )
    return [event_record.event_log_entry for event_record in event_records]
コード例 #5
0
 def __init__(self, base_dir, polling_timeout=None, inst_data=None):
     self._base_dir = base_dir
     self._polling_timeout = check.opt_float_param(
         polling_timeout, "polling_timeout", DEFAULT_WATCHDOG_POLLING_TIMEOUT
     )
     self._subscription_manager = LocalComputeLogSubscriptionManager(self)
     self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
コード例 #6
0
ファイル: types.py プロジェクト: nuruladroady/dagster
 def __new__(
     cls,
     repository_origin,
     instance_ref,
     schedule_name,
     schedule_execution_data_mode,
     scheduled_execution_timestamp=None,
     scheduled_execution_timezone=None,
 ):
     return super(ExternalScheduleExecutionArgs, cls).__new__(
         cls,
         repository_origin=check.inst_param(
             repository_origin, "repository_origin", RepositoryOrigin
         ),
         instance_ref=check.inst_param(instance_ref, "instance_ref", InstanceRef),
         schedule_name=check.str_param(schedule_name, "schedule_name"),
         schedule_execution_data_mode=check.inst_param(
             schedule_execution_data_mode,
             "schedule_execution_data_mode",
             ScheduleExecutionDataMode,
         ),
         scheduled_execution_timestamp=check.opt_float_param(
             scheduled_execution_timestamp, "scheduled_execution_timestamp"
         ),
         scheduled_execution_timezone=check.opt_str_param(
             scheduled_execution_timezone, "scheduled_execution_timezone",
         ),
     )
コード例 #7
0
ファイル: controller.py プロジェクト: pawelad/dagster
def get_daemon_status(instance, daemon_type, curr_time_seconds=None, ignore_errors=False):
    curr_time_seconds = check.opt_float_param(
        curr_time_seconds, "curr_time_seconds", default=pendulum.now("UTC").float_timestamp
    )

    # check if daemon required
    if daemon_type not in instance.get_required_daemon_types():
        return DaemonStatus(
            daemon_type=daemon_type, required=False, healthy=None, last_heartbeat=None
        )

    # check if daemon present
    heartbeats = instance.get_daemon_heartbeats()
    if daemon_type not in heartbeats:
        return DaemonStatus(
            daemon_type=daemon_type, required=True, healthy=False, last_heartbeat=None
        )

    # check if daemon has sent a recent heartbeat
    latest_heartbeat = heartbeats[daemon_type]
    hearbeat_timestamp = latest_heartbeat.timestamp
    maximum_tolerated_time = (
        hearbeat_timestamp + DAEMON_HEARTBEAT_INTERVAL_SECONDS + DAEMON_HEARTBEAT_TOLERANCE_SECONDS
    )
    healthy = curr_time_seconds <= maximum_tolerated_time

    if not ignore_errors and latest_heartbeat.errors:
        healthy = False

    return DaemonStatus(
        daemon_type=daemon_type,
        required=True,
        healthy=healthy,
        last_heartbeat=heartbeats[daemon_type],
    )
コード例 #8
0
 def __new__(
     cls,
     node: Dict[str, Any],
     error: Optional[str] = None,
     status: Optional[Union[str, int]] = None,
     execution_time: Optional[float] = None,
     thread_id: Optional[str] = None,
     step_timings: List[StepTiming] = None,
     table: Optional[Dict[str, Any]] = None,
     fail: Optional[Any] = None,
     warn: Optional[Any] = None,
     skip: Optional[Any] = None,
 ):
     step_timings = check.list_param(step_timings, "step_timings", of_type=StepTiming)
     return super().__new__(
         cls,
         check.dict_param(node, "node", key_type=str),
         check.opt_str_param(error, "error"),
         status,
         check.opt_float_param(execution_time, "execution_time"),
         check.opt_str_param(thread_id, "thread_id"),
         step_timings,
         check.opt_dict_param(table, "table"),
         fail,
         warn,
         skip,
     )
コード例 #9
0
 def __new__(cls, last_tick_timestamp=None, last_run_key=None, min_interval=None, cursor=None):
     return super(SensorJobData, cls).__new__(
         cls,
         check.opt_float_param(last_tick_timestamp, "last_tick_timestamp"),
         check.opt_str_param(last_run_key, "last_run_key"),
         check.opt_int_param(min_interval, "min_interval"),
         check.opt_str_param(cursor, "cursor"),
     )
コード例 #10
0
ファイル: sensor.py プロジェクト: yingjiebyron/dagster
 def __init__(self, instance, last_completion_time, last_run_key):
     super(SensorExecutionContext, self).__init__(
         check.inst_param(instance, "instance", DagsterInstance),
     )
     self._last_completion_time = check.opt_float_param(
         last_completion_time, "last_completion_time"
     )
     self._last_run_key = check.opt_str_param(last_run_key, "last_run_key")
コード例 #11
0
ファイル: job.py プロジェクト: yingjiebyron/dagster
 def __new__(cls, cron_schedule, start_timestamp=None):
     return super(ScheduleJobData, cls).__new__(
         cls,
         check.str_param(cron_schedule, "cron_schedule"),
         # Time in UTC at which the user started running the schedule (distinct from
         # `start_date` on partition-based schedules, which is used to define
         # the range of partitions)
         check.opt_float_param(start_timestamp, "start_timestamp"),
     )
コード例 #12
0
ファイル: active.py プロジェクト: ericbellet/dagster
    def mark_up_for_retry(self, step_key, at_time=None):
        check.invariant(
            not self._retries.disabled,
            "Attempted to mark {} as up for retry but retries are disabled".format(step_key),
        )
        check.opt_float_param(at_time, "at_time")

        # if retries are enabled - queue this back up
        if self._retries.enabled:
            if at_time:
                self._waiting_to_retry[step_key] = at_time
            else:
                self._pending[step_key] = self._plan.execution_deps()[step_key]

        elif self._retries.deferred:
            self._completed.add(step_key)

        self._retries.mark_attempt(step_key)
        self._in_flight.remove(step_key)
コード例 #13
0
ファイル: pipeline_run.py プロジェクト: keyz/dagster
 def __new__(
     cls,
     storage_id,
     pipeline_run,
     create_timestamp,
     update_timestamp,
     start_time=None,
     end_time=None,
 ):
     return super(RunRecord, cls).__new__(
         cls,
         storage_id=check.int_param(storage_id, "storage_id"),
         pipeline_run=check.inst_param(pipeline_run, "pipeline_run", PipelineRun),
         create_timestamp=check.inst_param(create_timestamp, "create_timestamp", datetime),
         update_timestamp=check.inst_param(update_timestamp, "update_timestamp", datetime),
         # start_time and end_time fields will be populated once the run has started and ended, respectively, but will be None beforehand.
         start_time=check.opt_float_param(start_time, "start_time"),
         end_time=check.opt_float_param(end_time, "end_time"),
     )
コード例 #14
0
    def get_job_ticks(self, job_origin_id, before=None, after=None, limit=None):
        check.str_param(job_origin_id, "job_origin_id")
        check.opt_float_param(before, "before")
        check.opt_float_param(after, "after")
        check.opt_int_param(limit, "limit")

        query = (
            db.select([JobTickTable.c.id, JobTickTable.c.tick_body])
            .select_from(JobTickTable)
            .where(JobTickTable.c.job_origin_id == job_origin_id)
            .order_by(JobTickTable.c.id.desc())
        )

        query = self._add_filter_limit(query, before=before, after=after, limit=limit)

        rows = self.execute(query)
        return list(
            map(lambda r: InstigatorTick(r[0], deserialize_json_to_dagster_namedtuple(r[1])), rows)
        )
コード例 #15
0
    def __init__(self, instance_ref, last_completion_time, last_run_key):
        self._exit_stack = ExitStack()
        self._instance = None

        self._instance_ref = check.inst_param(instance_ref, "instance_ref",
                                              InstanceRef)
        self._last_completion_time = check.opt_float_param(
            last_completion_time, "last_completion_time")
        self._last_run_key = check.opt_str_param(last_run_key, "last_run_key")

        self._instance = None
コード例 #16
0
ファイル: stats.py プロジェクト: hhy5277/dagster
    def __new__(
        cls,
        run_id,
        step_key,
        status=None,
        start_time=None,
        end_time=None,
    ):
        check.opt_inst_param(status, 'status', DagsterEventType)
        if status:
            _status_str = status.value

        return super(RunStepKeyStatsSnapshot, cls).__new__(
            cls,
            run_id=check.str_param(run_id, 'run_id'),
            step_key=check.str_param(step_key, 'step_key'),
            status=_status_str,
            start_time=check.opt_float_param(start_time, 'start_time'),
            end_time=check.opt_float_param(end_time, 'end_time'),
        )
コード例 #17
0
ファイル: snapshot_sensor.py プロジェクト: xaniasd/dagster
def sync_get_external_sensor_execution_data_grpc(api_client, instance,
                                                 repository_handle,
                                                 sensor_name,
                                                 last_completion_time):
    check.inst_param(repository_handle, "repository_handle", RepositoryHandle)
    check.str_param(sensor_name, "sensor_name")
    check.opt_float_param(last_completion_time, "last_completion_time")

    origin = repository_handle.get_external_origin()

    return check.inst(
        api_client.external_sensor_execution(
            sensor_execution_args=SensorExecutionArgs(
                repository_origin=origin,
                instance_ref=instance.get_ref(),
                sensor_name=sensor_name,
                last_completion_time=last_completion_time,
            )),
        (ExternalSensorExecutionData, ExternalSensorExecutionErrorData),
    )
コード例 #18
0
 def __new__(
     cls,
     run_id,
     steps_succeeded,
     steps_failed,
     materializations,
     expectations,
     start_time,
     end_time,
 ):
     return super(PipelineRunStatsSnapshot, cls).__new__(
         cls,
         run_id=check.str_param(run_id, "run_id"),
         steps_succeeded=check.int_param(steps_succeeded, "steps_succeeded"),
         steps_failed=check.int_param(steps_failed, "steps_failed"),
         materializations=check.int_param(materializations, "materializations"),
         expectations=check.int_param(expectations, "expectations"),
         start_time=check.opt_float_param(start_time, "start_time"),
         end_time=check.opt_float_param(end_time, "end_time"),
     )
コード例 #19
0
ファイル: types.py プロジェクト: xaniasd/dagster
 def __new__(cls, repository_origin, instance_ref, sensor_name, last_completion_time):
     return super(SensorExecutionArgs, cls).__new__(
         cls,
         repository_origin=check.inst_param(
             repository_origin, "repository_origin", ExternalRepositoryOrigin
         ),
         instance_ref=check.inst_param(instance_ref, "instance_ref", InstanceRef),
         sensor_name=check.str_param(sensor_name, "sensor_name"),
         last_completion_time=check.opt_float_param(
             last_completion_time, "last_completion_time"
         ),
     )
コード例 #20
0
ファイル: active.py プロジェクト: uttasarga9067/dagster
    def mark_up_for_retry(self, step_key: str, at_time: Optional[float] = None) -> None:
        check.invariant(
            not self._retries.disabled,
            "Attempted to mark {} as up for retry but retries are disabled".format(step_key),
        )
        check.opt_float_param(at_time, "at_time")

        # if retries are enabled - queue this back up
        if self._retries.enabled:
            if at_time:
                self._waiting_to_retry[step_key] = at_time
            else:
                self._pending[step_key] = self._plan.get_executable_step_deps()[step_key]

        elif self._retries.deferred:
            # do not attempt to execute again
            self._abandoned.add(step_key)

        self._retries.mark_attempt(step_key)

        self._mark_complete(step_key)
コード例 #21
0
ファイル: controller.py プロジェクト: trevenrawr/dagster
def get_daemon_statuses(
    instance,
    daemon_types,
    curr_time_seconds=None,
    ignore_errors=False,
    heartbeat_interval_seconds=DEFAULT_HEARTBEAT_INTERVAL_SECONDS,
    heartbeat_tolerance_seconds=DEFAULT_DAEMON_HEARTBEAT_TOLERANCE_SECONDS,
):
    curr_time_seconds = check.opt_float_param(
        curr_time_seconds,
        "curr_time_seconds",
        default=pendulum.now("UTC").float_timestamp)

    daemon_statuses_by_type = {}
    heartbeats = instance.get_daemon_heartbeats()

    for daemon_type in daemon_types:
        # check if daemon is not required
        if daemon_type not in instance.get_required_daemon_types():
            daemon_statuses_by_type[daemon_type] = DaemonStatus(
                daemon_type=daemon_type,
                required=False,
                healthy=None,
                last_heartbeat=None)
        else:
            # check if daemon has a heartbeat
            if daemon_type not in heartbeats:
                daemon_statuses_by_type[daemon_type] = DaemonStatus(
                    daemon_type=daemon_type,
                    required=True,
                    healthy=False,
                    last_heartbeat=None)
            else:
                # check if daemon has sent a recent heartbeat
                latest_heartbeat = heartbeats[daemon_type]
                hearbeat_timestamp = latest_heartbeat.timestamp
                maximum_tolerated_time = (hearbeat_timestamp +
                                          heartbeat_interval_seconds +
                                          heartbeat_tolerance_seconds)
                healthy = curr_time_seconds <= maximum_tolerated_time

                if not ignore_errors and latest_heartbeat.errors:
                    healthy = False

                daemon_statuses_by_type[daemon_type] = DaemonStatus(
                    daemon_type=daemon_type,
                    required=True,
                    healthy=healthy,
                    last_heartbeat=heartbeats[daemon_type],
                )

    return daemon_statuses_by_type
コード例 #22
0
ファイル: scheduler.py プロジェクト: jmsanders/dagster
    def __new__(cls, origin, status, cron_schedule, start_timestamp=None):

        return super(ScheduleState, cls).__new__(
            cls,
            # Using the term "origin" to leave flexibility in handling future types
            check.inst_param(origin, "origin", ScheduleOrigin),
            check.inst_param(status, "status", ScheduleStatus),
            check.str_param(cron_schedule, "cron_schedule"),
            # Time in UTC at which the user started running the schedule (distinct from
            # `start_date` on partition-based schedules, which is used to define
            # the range of partitions)
            check.opt_float_param(start_timestamp, "start_timestamp"),
        )
コード例 #23
0
    def get_ticks(self,
                  origin_id,
                  selector_id,
                  before=None,
                  after=None,
                  limit=None,
                  statuses=None):
        check.str_param(origin_id, "origin_id")
        check.opt_float_param(before, "before")
        check.opt_float_param(after, "after")
        check.opt_int_param(limit, "limit")
        check.opt_list_param(statuses, "statuses", of_type=TickStatus)

        base_query = (db.select([
            JobTickTable.c.id, JobTickTable.c.tick_body
        ]).select_from(JobTickTable).order_by(JobTickTable.c.timestamp.desc()))
        if self.has_instigators_table():
            query = base_query.where(
                db.or_(
                    JobTickTable.c.selector_id == selector_id,
                    db.and_(
                        JobTickTable.c.selector_id == None,
                        JobTickTable.c.job_origin_id == origin_id,
                    ),
                ))
        else:
            query = base_query.where(JobTickTable.c.job_origin_id == origin_id)

        query = self._add_filter_limit(query,
                                       before=before,
                                       after=after,
                                       limit=limit,
                                       statuses=statuses)

        rows = self.execute(query)
        return list(
            map(
                lambda r: InstigatorTick(
                    r[0], deserialize_json_to_dagster_namedtuple(r[1])), rows))
コード例 #24
0
ファイル: test_check.py プロジェクト: shcheklein/dagster
def test_opt_float_param():
    assert check.opt_float_param(-1.0, 'param_name') == -1.0
    assert check.opt_float_param(0.0, 'param_name') == 0.0
    assert check.opt_float_param(1.1, 'param_name') == 1.1
    assert check.opt_float_param(None, 'param_name') is None

    with pytest.raises(ParameterCheckError):
        check.opt_float_param('s', 'param_name')
コード例 #25
0
ファイル: test_check.py プロジェクト: iamahern/dagster
def test_opt_float_param():
    assert check.opt_float_param(-1.0, "param_name") == -1.0
    assert check.opt_float_param(0.0, "param_name") == 0.0
    assert check.opt_float_param(1.1, "param_name") == 1.1
    assert check.opt_float_param(None, "param_name") is None

    with pytest.raises(ParameterCheckError):
        check.opt_float_param("s", "param_name")
コード例 #26
0
ファイル: instigation.py プロジェクト: helloworld/dagster
 def __new__(
     cls,
     last_tick_timestamp: Optional[float] = None,
     last_run_key: Optional[str] = None,
     min_interval: Optional[int] = None,
     cursor: Optional[str] = None,
 ):
     return super(SensorInstigatorData, cls).__new__(
         cls,
         check.opt_float_param(last_tick_timestamp, "last_tick_timestamp"),
         check.opt_str_param(last_run_key, "last_run_key"),
         check.opt_int_param(min_interval, "min_interval"),
         check.opt_str_param(cursor, "cursor"),
     )
コード例 #27
0
 def __new__(
     cls,
     logs: List[Dict[str, Any]],
     results: List[NodeResult],
     generated_at: str,
     elapsed_time: Optional[float] = None,
 ):
     return super().__new__(
         cls,
         check.list_param(logs, "logs", of_type=Dict),
         results,
         check.str_param(generated_at, "generated_at"),
         check.opt_float_param(elapsed_time, "elapsed_time"),
     )
コード例 #28
0
    def _add_filter_limit(self,
                          query,
                          before=None,
                          after=None,
                          limit=None,
                          statuses=None):
        check.opt_float_param(before, "before")
        check.opt_float_param(after, "after")
        check.opt_int_param(limit, "limit")
        check.opt_list_param(statuses, "statuses", of_type=TickStatus)

        if before:
            query = query.where(
                JobTickTable.c.timestamp < utc_datetime_from_timestamp(before))
        if after:
            query = query.where(
                JobTickTable.c.timestamp > utc_datetime_from_timestamp(after))
        if limit:
            query = query.limit(limit)
        if statuses:
            query = query.where(
                JobTickTable.c.status.in_(
                    [status.value for status in statuses]))
        return query
コード例 #29
0
    def __init__(
        self,
        instance_ref: InstanceRef,
        last_completion_time: Optional[float],
        last_run_key: Optional[str],
        cursor: Optional[str],
    ):
        self._exit_stack = ExitStack()
        self._instance = None

        self._instance_ref = check.inst_param(instance_ref, "instance_ref",
                                              InstanceRef)
        self._last_completion_time = check.opt_float_param(
            last_completion_time, "last_completion_time")
        self._last_run_key = check.opt_str_param(last_run_key, "last_run_key")
        self._cursor = check.opt_str_param(cursor, "cursor")

        self._instance = None
コード例 #30
0
 def __init__(
     self,
     step_handler: StepHandler,
     retries: RetryMode = RetryMode.DISABLED,
     sleep_seconds: Optional[float] = None,
     check_step_health_interval_seconds: Optional[int] = None,
 ):
     self._step_handler = step_handler
     self._retries = retries
     self._sleep_seconds = cast(
         float,
         check.opt_float_param(sleep_seconds, "sleep_seconds", default=0.1))
     self._check_step_health_interval_seconds = cast(
         int,
         check.opt_int_param(check_step_health_interval_seconds,
                             "check_step_health_interval_seconds",
                             default=20),
     )