def test_patch_timeout_bigger_than_0(
         self, service: Service, timeout: timedelta
 ) -> None:
     body = {'timeout': timeout.total_seconds()}
     self._send_patch_request(service, body)
     self.assertEqual(
         floor(timeout.total_seconds()), service.timeout.total_seconds()
     )
Example #2
0
    def fetch_rows(self, column_names: Tuple, interval: timedelta) -> List[Tuple]:
        str_columns: str = ''
        column_names = ("Timestamp",) + column_names
        for str_column in column_names:
            if not str_column:
                continue
            if str_columns:
                str_columns += ', '
            str_columns += str_column
        str_query: str = 'SELECT {} ' \
                         'FROM `{}.{}` ' \
                         'WHERE Timestamp > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL {} SECOND)'. \
            format(str_columns, self.bot.bq.dataset_id, self.telemetry_table_id, int(interval.total_seconds()))

        print('MonitoringTelegramBot: About to execute query: "{}"'.format(str_query))
        job: bigquery.job.QueryJob = self.bot.bq.client.query(str_query, location=self.bot.bq.location)

        result: List[Tuple] = []
        for row in job.result():
            columns: Tuple = ()
            for str_column in column_names:
                if not str_column:
                    columns += (None,)
                else:
                    columns += (row.get(str_column),)
            result.append(columns)

        result.sort(key=lambda x: x[0])
        # for r in result:
        #     print(r)
        return result
Example #3
0
 def test_that_setting_valid_timeout_changes_it(
         self, timeout: timedelta
 ):
     self.service.timeout = timeout
     self.assertEqual(
         timeout.total_seconds(), self.service.timeout.total_seconds()
     )
 def validate_timeout(self, timeout: timedelta):
     if timeout.total_seconds() < 0:
         raise ValidationError(
             'Attempted to set a negative timeout duration'
         )
     else:
         return True
Example #5
0
def get_cool_off_iso8601(delta: timedelta) -> str:
    """
    Return datetime.timedelta translated to ISO 8601 formatted duration for use in e.g. cool offs.
    """

    seconds = delta.total_seconds()
    minutes, seconds = divmod(seconds, 60)
    hours, minutes = divmod(minutes, 60)
    days, hours = divmod(hours, 24)

    days_str = f'{days:.0f}D' if days else ''

    time_str = ''.join(
        f'{value:.0f}{designator}'
        for value, designator
        in [
            [hours, 'H'],
            [minutes, 'M'],
            [seconds, 'S'],
        ]
        if value
    )

    if time_str:
        return f'P{days_str}T{time_str}'
    return f'P{days_str}'
Example #6
0
def modulo_timedelta(dt: datetime, td: timedelta) -> datetime:
    """
    Takes a datetime to perform modulo on and a timedelta.
    :returns: dt % td
    """
    today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
    return timedelta(seconds=((dt - today).total_seconds() % td.total_seconds()))
Example #7
0
def timedelta_repr(td: datetime.timedelta) -> str:
    """
    :returns: a human readable representation of the provided timedelta object
    """
    assert isinstance(td, datetime.timedelta), type(td)
    ZERO = {'00', '0'}

    td = td.__str__().split(':')

    end = []
    if td[0] not in ZERO:
        end.append('{} hours'.format(td[0]))

    if td[1] not in ZERO:
        end.append('{} minutes'.format(td[1]))

    if td[2] not in ZERO:
        end.append('{} seconds'.format(td[2]))

    if len(end) > 1:
        end.append('and ' + end.pop(-1))

    return ', '.join(
        val.lstrip('0')
        for val in end
    )
Example #8
0
 def schedule_next(self, delay: datetime.timedelta):
     self._next_expected = datetime.datetime.now() + delay
     if self.scheduler is None:
         delay_secs = delay.total_seconds()
         default_run_worker(delay_secs, self)
     else:
         self.scheduler.add(delay, self)
Example #9
0
    def mark_as_failed(self, identifier: str, requeue_delay: timedelta=timedelta(0)):
        self._queue.mark_finished(identifier)
        self._queue.mark_dirty(identifier, requeue_delay)
        logging.debug('%s has been marked as failed', identifier)

        # Broadcast the change after the requeue delay
        # FIXME? Timer's interval may not be 100% accurate and may also
        # not correspond with the database server; this could go out of
        # synch... Add a tolerance??
        Timer(requeue_delay.total_seconds(), self._broadcast).start()
Example #10
0
 def from_python(cls, value: timedelta):
     if value is None:
         return Null.from_python()
     data_type = cls.build_c_type()
     data_object = data_type()
     data_object.type_code = int.from_bytes(
         cls.type_code,
         byteorder=PROTOCOL_BYTE_ORDER
     )
     data_object.value = int(value.total_seconds() * 1000)
     return bytes(data_object)
Example #11
0
    def __init__(self, payload_factory:Callable[..., Iterable], latency:timedelta):
        super().__init__()

        self._discharge_latency = latency.total_seconds()

        self._lock = Lock()
        self._payload = payload_factory()

        # Start the watcher
        self._watcher_thread = Thread(target=self._watcher, daemon=True)
        self._watching = True
        self._watcher_thread.start()
Example #12
0
def batch_raw_query(prometheus_endpoint: ParseResult,
                    start_timestamp: int,
                    end_timestamp: int,
                    step: datetime.timedelta,
                    query: str,
                    maxpts=11000) -> Iterable[bytes]:
    """Retrieve metrics from a Prometheus database"""
    sstep = '{}s'.format(int(step.total_seconds()))
    url = urljoin(prometheus_endpoint.geturl(), 'api/v1/query_range')

    def sub(sub_start, sub_end):
        """sub"""
        payload = [('start', sub_start),
                   ('end', sub_end),
                   ('step', sstep),
                   ('query', query)]
        req = requests.get(url, params=payload)
        return req.content
    delta = end_timestamp - start_timestamp
    batch_size = min(delta // int(step.total_seconds()), maxpts)  # type: int
    for limits in _create_batches(start_timestamp, end_timestamp, batch_size):
        sub_start, sub_end = limits
        yield sub(sub_start, sub_end)
    def mark_as_failed(self, identifier: str, requeue_delay: timedelta=timedelta(0)):
        if identifier not in self._known_data:
            raise ValueError("Not known: %s" % identifier)
        with self._lists_lock:
            self._assert_is_being_processed(identifier)
            self._processing.remove(identifier)
            self._failed.append(identifier)

        if requeue_delay is not None:
            if requeue_delay.total_seconds() == 0:
                self._reprocess(identifier)
            else:
                end_time = self._get_time() + requeue_delay.total_seconds()

                def on_delay_end():
                    if timer in self._timers[end_time]:
                        self._timers[end_time].remove(timer)
                        self._reprocess(identifier)

                timer = Timer(requeue_delay.total_seconds(), on_delay_end)
                self._timers[end_time].append(timer)
                timer.start()
        else:
            self._on_complete(identifier)
Example #14
0
 def time_ago(interval: timedelta) -> str:
     ago_string = ''
     s = interval.total_seconds()
     if (s >= 31536000):
         return "{0:-4.1f} years".format(s/31536000)
     elif (s >= 2628000):
         return "{0:-4.1f} months".format(s/2628000)
     elif (s >= 604800):
         return "{0:-4.1f} weeks".format(s/604800)
     elif (s >= 86400):
         return "{0:-4.1f} days".format(s/86400)
     elif (s >= 3600):
         return "{0:-4.1f} hours".format(s/3600)
     elif (s >= 60):
         return "{0:-4.1f} minutes".format(s/60)
     else:
         return "{0:-4.1f} seconds".format(s)
    def format_timestamp(time: datetime.timedelta) -> str:
        """
        Convert timedelta to hh:mm:ss.mmm
        https://matroska.org/technical/specs/subtitles/srt.html

        :param time: Timedelta
        :return: Formatted time string
        """
        days, seconds = divmod(time.total_seconds(), 24 * 60 * 60)
        hours, seconds = divmod(seconds, 60 * 60)
        minutes, seconds = divmod(seconds, 60)
        milliseconds = int((seconds - int(seconds)) * 1000)

        # Floor seconds and merge days to hours
        seconds = int(seconds)
        hours += days * 24

        return f'{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d},{milliseconds:03d}'
Example #16
0
def td_format(td_object: timedelta):
    seconds = int(td_object.total_seconds())
    periods = [
        ("year", 60 * 60 * 24 * 365),
        ("month", 60 * 60 * 24 * 30),
        ("day", 60 * 60 * 24),
        ("hour", 60 * 60),
        ("minute", 60),
        ("second", 1),
    ]

    strings = []
    for period_name, period_seconds in periods:
        if seconds > period_seconds:
            period_value, seconds = divmod(seconds, period_seconds)
            has_s = "s" if period_value > 1 else ""
            strings.append("%s %s%s" % (period_value, period_name, has_s))

    return ", ".join(strings)
Example #17
0
    def __init__(self, couchdb_url:str, couchdb_name:str, buffer_capacity:int = 1000,
                                                          buffer_latency:timedelta = timedelta(milliseconds=50),
                                                          **kwargs):
        """
        Constructor: Initialise the database interfaces

        @param  couchdb_url      CouchDB URL
        @param  couchdb_name     Database name
        @param  buffer_capacity  Buffer capacity
        @param  buffer_latency   Buffer latency
        """
        super().__init__()
        self._sofa = Sofabed(couchdb_url, couchdb_name, buffer_capacity, buffer_latency, **kwargs)
        self._queue = _Bert(self._sofa)
        self._metadata = _Ernie(self._sofa)

        self._queue_lock = CountingLock()
        self._pending_cache = deque()

        self._latency = buffer_latency.total_seconds()
Example #18
0
File: util.py Project: irmen/Tale
def duration_display(duration: datetime.timedelta) -> str:
    secs = duration.total_seconds()
    if secs == 0:
        return "no time at all"
    hours, secs = divmod(secs, 3600)
    minutes, secs = divmod(secs, 60)
    result = []
    if hours == 1:
        result.append("1 hour")
    elif hours > 1:
        result.append("%d hours" % hours)
    if minutes == 1:
        result.append("1 minute")
    elif minutes > 1:
        result.append("%d minutes" % minutes)
    if secs == 1:
        result.append("1 second")
    elif secs > 1:
        result.append("%d seconds" % secs)
    return lang.join(result)
Example #19
0
def iso8601(delta: timedelta) -> str:
    """
    Return datetime.timedelta translated to ISO 8601 formatted duration.
    """

    seconds = delta.total_seconds()
    minutes, seconds = divmod(seconds, 60)
    hours, minutes = divmod(minutes, 60)
    days, hours = divmod(hours, 24)

    date = '{:.0f}D'.format(days) if days else ''

    time_values = hours, minutes, seconds
    time_designators = 'H', 'M', 'S'

    time = ''.join([
        ('{:.0f}'.format(value) + designator)
        for value, designator in zip(time_values, time_designators)
        if value]
    )
    return 'P' + date + ('T' + time if time else '')
Example #20
0
def _format_timedelta(delta: timedelta):
    total_seconds = delta.total_seconds()
    hours, remainder = divmod(total_seconds, 3600)
    minutes, seconds = divmod(remainder, 60)
    return f"{int(hours)}:{int(minutes):02}:{int(seconds):02}"
 def delta_to_json(delta: timedelta) -> str:
     parts = str(delta.total_seconds()).split(".")
     if len(parts) > 1:
         while len(parts[1]) not in [3, 6, 9]:
             parts[1] = parts[1] + "0"
     return ".".join(parts) + "s"
Example #22
0
 def value_to_json(cls, value: timedelta):
     if type(value) is not timedelta:
         raise TypeError(f'{value} must be a datetime.timedelta.')
     else:
         return str(value.total_seconds())
Example #23
0
def _time_attribute_string(sum_of_time_for_cases: datetime.timedelta) -> str:
    return '%f' % sum_of_time_for_cases.total_seconds()
Example #24
0
def timedelta_to_mesh_tai_utc_delta(time_zone: timedelta) -> int:
    assert (time_zone.total_seconds().is_integer())
    return int(time_zone.total_seconds() + TAI_UTC_DELTA_ZERO)
Example #25
0
def hours(td: timedelta):
    secs = td.total_seconds()
    hours = round(secs / 3600.0)
    return hours
Example #26
0
 def filter_toelapsed(self, input: datetime.timedelta) -> str:
     return '%ds ago' % (input.total_seconds())
def render_timedelta(delta: datetime.timedelta):
    hours, minutes = divmod(delta.total_seconds() / 60, 60)
    hours = str(int(hours))
    minutes = str(int(minutes)).zfill(2)
    return f"{hours}:{minutes}"
Example #28
0
 def elapsed_time(self, new_value: timedelta):
     self.response_time = new_value.total_seconds() * 1000
Example #29
0
 def _parse_time_delta(timedelta: datetime.timedelta) -> int:
     return int(timedelta.total_seconds())
Example #30
0
def dump_timedelta(x: datetime.timedelta) -> int:
    """Convert a timedelta to an int."""
    return int(x.total_seconds())
Example #31
0
    def serialize(self, obj: timedelta) -> Any:
        if not isinstance(obj, timedelta):
            raise SerializationError(val=obj)

        return obj.total_seconds()
Example #32
0
 def _call_later(delay: datetime.timedelta, callback):
     IOLoop.current().call_later(delay=delay.total_seconds(),
                                 callback=callback)
Example #33
0
def elapsed_time_value_and_unit(td: datetime.timedelta) -> (str, str):
    s = '%f' % td.total_seconds()
    return (s, 's')
Example #34
0
 def _call_later(delay: datetime.timedelta, callback):
     asyncio.get_event_loop().call_later(delay=delay.total_seconds(),
                                         callback=callback)
Example #35
0
def execute(working_dir: str, frequency: str, duration: timedelta, sh=sh):
    raw_path = os.path.join(working_dir, "signal.raw")
    signal_path = os.path.join(working_dir, "signal.wav")
    product_path = os.path.join(working_dir, "product.png")
    log_path = os.path.join(working_dir, "session.log")

    sample_rate = 48000

    # Let's log the operations done by the tools to a log file. We need to flush it
    # frequently, because this file stream is also used capture tools output. Without
    # flush, the logging order gets completely messed up.
    logfile = open(log_path, "w")
    logfile.write("---rtl_fm log-------\n")
    logfile.flush()

    # Run rtl_fm/rx_fm - this records the actual samples from the RTL device
    with suppress(sh.TimeoutException):
        sh.rtl_fm(
            # Specify frequency (in Hz, e.g. 137MHz)
            "-f",
            frequency,
            # Specify sampling rate (e.g. 48000 Hz)
            "-s",
            sample_rate,
            # Maximal possible value. Probably is wrong for other SDR then rtl-sdr
            "-g",
            49.6,
            # Copy-paste from suspects www
            "-p",
            1,
            # Higher quality downsampling - possible value 0 or 9. 9 is experimental.
            "-F",
            9,
            # Enable bias-T
            "-T",
            # How arctan is computed. We don't test other options.
            "-A",
            "fast",
            # dc blocking filter (?)
            "-E",
            "DC",
            # Output to pipe, optional in this command
            raw_path,
            _timeout=duration.total_seconds(),
            _timeout_signal=signal.SIGTERM,

            # rtl_fm and rx_fm both print messages on stderr
            _err=logfile)
    logfile.flush()

    logfile.write("---sox log-------\n")
    logfile.flush()

    # Run sox - this convert raw samples into audible WAV
    sh.sox(  # Type of input
        "-t",
        "raw",
        # Sample size in bits
        "-b16",
        # Signed integer encoding
        "-es",
        "-r",
        sample_rate,
        # Number of channels of audio data - 1 - mono
        "-c1",
        # Verbosity level (0 - silence, 1 - failure messages, 2 - warnings, 3 - processing phases, 4 - debug)
        "-V3",
        # Read from the raw file (instead of stdin via pipe)
        raw_path,
        # Output path
        signal_path,
        # Resampling rate
        "rate",
        "11025",
        _out=logfile)
    logfile.flush()

    logfile.write("---noaa-apt log-------\n")
    logfile.flush()

    # Run noaa_apt - this decodes APT from the audio file into PNG image.
    sh.noaa_apt("-o",
                product_path,
                "--false-color",
                "--contrast",
                "telemetry",
                signal_path,
                _out=logfile)
    logfile.flush()
    logfile.close()

    return [("SIGNAL", signal_path), ("PRODUCT", product_path),
            ("LOG", log_path), ("RAW", raw_path)]
Example #36
0
def check_and_update_authn_fresh(
    within: datetime.timedelta,
    grace: datetime.timedelta,
    method: t.Optional[str] = None,
) -> bool:
    """Check if user authenticated within specified time and update grace period.

    :param within: A timedelta specifying the maximum time in the past that the caller
                  authenticated that is still considered 'fresh'.
    :param grace: A timedelta that, if the current session is considered 'fresh'
                  will set a grace period for which freshness won't be checked.
                  The intent here is that the caller shouldn't get part-way though
                  a set of operations and suddenly be required to authenticate again.
    :param method: Optional - if set and == "basic" then will always return True.
                  (since basic-auth sends username/password on every request)

    If within.total_seconds() is negative, will always return True (always 'fresh').
    This effectively just disables this entire mechanism.

    If "fs_gexp" is in the session and the current timestamp is less than that,
    return True and extend grace time (i.e. set fs_gexp to current time + grace).

    If not within the grace period, and within.total_seconds() is 0,
    return False (not fresh).

    Be aware that for this to work, sessions and therefore session cookies
    must be functioning and being sent as part of the request. If the required
    state isn't in the session cookie then return False (not 'fresh').

    .. warning::
        Be sure the caller is already authenticated PRIOR to calling this method.

    .. versionadded:: 3.4.0

    .. versionchanged:: 4.0.0
        Added `method` parameter.
    """

    if method == "basic":
        return True

    if within.total_seconds() < 0:
        # this means 'always fresh'
        return True

    if "fs_paa" not in session:
        # No session, you can't play.
        return False

    now = datetime.datetime.utcnow()
    new_exp = now + grace
    grace_ts = int(new_exp.timestamp())

    fs_gexp = session.get("fs_gexp", None)
    if fs_gexp:
        if now.timestamp() < fs_gexp:
            # Within grace period - extend it, and we're good.
            session["fs_gexp"] = grace_ts
            return True

    # Special case 0 - return False always, but set grace period.
    if within.total_seconds() == 0:
        session["fs_gexp"] = grace_ts
        return False

    authn_time = datetime.datetime.utcfromtimestamp(session["fs_paa"])
    # allow for some time drift where it's possible authn_time is in the future
    # but let's be cautious and not allow arbitrary future times
    delta = now - authn_time
    if within > delta > -within:
        session["fs_gexp"] = grace_ts
        return True
    return False
Example #37
0
 def _apply_timeout(
         method_timeout: datetime.timedelta,
         future: asyncio.Future) -> asyncio.Future:  # type: ignore
     return asyncio.wait_for(future, method_timeout.total_seconds())
Example #38
0
 def pos(date: timedelta):
     return int(round(date.total_seconds() * rate))
 def fromTimeDelta(cls, td: timedelta):
     return cls(seconds=td.total_seconds())
Example #40
0
 def assert_timedeltas_almost_equal(self,
                                    td1: timedelta,
                                    td2: timedelta,
                                    places: int = 4):
     self.assertAlmostEqual(td1.total_seconds(), td2.total_seconds(),
                            places)
Example #41
0
 def _unstructure_timedelta(obj: timedelta) -> float:
     return obj.total_seconds()
Example #42
0
def improve_implementation(
        impl              : Implementation,
        timeout           : datetime.timedelta = datetime.timedelta(seconds=60),
        progress_callback : Callable[[Implementation], Any] = None,
        improve_count     : Value = None) -> Implementation:
    """Improve an implementation.

    This function tries to synthesize a better version of the given
    implementation. It returns the best version found within the given timeout.

    If provided, progress_callback will be called whenever a better
    implementation is found.  It will be given the better implementation, which
    it should not modify or cache.
    """

    start_time = datetime.datetime.now()

    # we statefully modify `impl`, so let's make a defensive copy which we will modify instead
    impl = impl.safe_copy()

    # worker threads ("jobs"), one per query
    improvement_jobs = []

    with jobs.SafeQueue() as solutions_q:

        def stop_jobs(js):
            """Stop the given jobs and remove them from `improvement_jobs`."""
            js = list(js)
            jobs.stop_jobs(js)
            for j in js:
                improvement_jobs.remove(j)

        def reconcile_jobs():
            """Sync up the current set of jobs and the set of queries.

            This function spawns new jobs for new queries and cleans up old
            jobs whose queries have been dead-code-eliminated."""

            # figure out what new jobs we need
            job_query_names  = set(j.q.name for j in improvement_jobs)
            new = []
            for q in impl.query_specs:
                if q.name not in job_query_names:
                    states_maintained_by_q = impl.states_maintained_by(q)
                    new.append(ImproveQueryJob(
                        impl.abstract_state,
                        list(impl.spec.assumptions) + list(q.assumptions),
                        q,
                        context=impl.context_for_method(q),
                        k=(lambda q: lambda new_rep, new_ret: solutions_q.put((q, new_rep, new_ret)))(q),
                        hints=[EStateVar(c).with_type(c.type) for c in impl.concretization_functions.values()],
                        freebies=[e for (v, e) in impl.concretization_functions.items() if EVar(v) in states_maintained_by_q],
                        ops=impl.op_specs,
                        improve_count=improve_count))

            # figure out what old jobs we can stop
            impl_query_names = set(q.name for q in impl.query_specs)
            old = [j for j in improvement_jobs if j.q.name not in impl_query_names]

            # make it so
            stop_jobs(old)
            for j in new:
                j.start()
            improvement_jobs.extend(new)

        # start jobs
        reconcile_jobs()

        # wait for results
        timeout = Timeout(timeout)
        done = False
        while not done and not timeout.is_timed_out():
            for j in improvement_jobs:
                if j.done:
                    if j.successful:
                        j.join()
                    else:
                        print("failed job: {}".format(j), file=sys.stderr)
                        # raise Exception("failed job: {}".format(j))

            done = all(j.done for j in improvement_jobs)

            try:
                # list of (Query, new_rep, new_ret) objects
                results = solutions_q.drain(block=True, timeout=0.5)
            except Empty:
                continue

            # group by query name, favoring later (i.e. better) solutions
            print("updating with {} new solutions".format(len(results)))
            improved_queries_by_name = OrderedDict()
            killed = 0
            for r in results:
                q, new_rep, new_ret = r
                if q.name in improved_queries_by_name:
                    killed += 1
                improved_queries_by_name[q.name] = r
            if killed:
                print(" --> dropped {} worse solutions".format(killed))

            improvements = list(improved_queries_by_name.values())
            def index_of(l, p):
                if not isinstance(l, list):
                    l = list(l)
                for i in range(len(l)):
                    if p(l[i]):
                        return i
                return -1
            improvements.sort(key = lambda i: index_of(impl.query_specs, lambda qq: qq.name == i[0].name))
            print("update order:")
            for (q, _, _) in improvements:
                print("  --> {}".format(q.name))

            # update query implementations
            i = 1
            for (q, new_rep, new_ret) in improvements:
                if timeout.is_timed_out():
                    break

                print("considering update {}/{}...".format(i, len(improvements)))
                i += 1
                # The guard on the next line might be false!
                # It might so happen that:
                #   - a job found a better version for q
                #   - a different job found a better version of some other query X
                #   - both improvements were in the `results` list pulled from the queue
                #   - we visited the improvement for X first
                #   - after cleanup, q is no longer needed and was removed
                if q.name in [qq.name for qq in impl.query_specs]:
                    elapsed = datetime.datetime.now() - start_time
                    print("SOLUTION FOR {} AT {} [size={}]".format(q.name, elapsed, new_ret.size() + sum(proj.size() for (v, proj) in new_rep)))
                    print("-" * 40)
                    for (sv, proj) in new_rep:
                        print("  {} : {} = {}".format(sv.id, pprint(sv.type), pprint(proj)))
                    print("  return {}".format(pprint(new_ret)))
                    print("-" * 40)
                    impl.set_impl(q, new_rep, new_ret)

                    # clean up
                    impl.cleanup()
                    if progress_callback is not None:
                        progress_callback(impl)
                    reconcile_jobs()
                else:
                    print("  (skipped; {} was aleady cleaned up)".format(q.name))

        # stop jobs
        print("Stopping jobs")
        stop_jobs(list(improvement_jobs))
        return impl
Example #43
0
def formatTimeDelta(td: timedelta) -> str:
    total = td.total_seconds()
    hours, remainder = divmod(total, 3600)
    minutes, seconds = divmod(remainder, 60)
    return '%d:%02d:%02d' % (hours, minutes, seconds)
Example #44
0
def trunc_ts(ts: datetime, step: timedelta):
    base = datetime.min.replace(year=2000)
    step_s = step.total_seconds()
    seconds = (ts - base).total_seconds()
    seconds = int(seconds / step_s) * step_s
    return (base + timedelta(seconds=seconds, milliseconds=500)).replace(microsecond=0)
def to_seconds(t: timedelta) -> int:
    return int(t.total_seconds())
Example #46
0
def time_str(delta: timedelta):
    minutes, seconds = divmod(delta.total_seconds(), 60)
    seconds_string = f"{int(seconds)}s"
    if minutes > 0:
        return f"{int(minutes)}m {seconds_string}"
    return seconds_string
Example #47
0
 def async_set_delayed_turn_off(self, time_period: timedelta):
     """Set delay off. The unit is different per device."""
     yield from self._try_command(
         "Setting the delay off failed.",
         self._light.delay_off, time_period.total_seconds())
Example #48
0
def convert_timedelta_in(value: datetime.timedelta):
    """
    Converts the timedelta value being passed into sqlite.
    """
    return value.total_seconds()
Example #49
0
def TimeToTicks(value):
    """Converts a Time object to ticks."""
    timeStruct = TimeDelta(hours = value.hour, minutes = value.minute, seconds = value.second, microseconds = value.microsecond)
    timeDec = decimal.Decimal(str(timeStruct.total_seconds()))
    return (int((timeDec + time.timezone) * 10**abs(timeDec.as_tuple()[2])), abs(timeDec.as_tuple()[2]))
    def get_metric_range_data(
        self,
        metric_name: str,
        label_config: dict = None,
        start_time: datetime = (datetime.now() - timedelta(minutes=10)),
        end_time: datetime = datetime.now(),
        chunk_size: timedelta = None,
        store_locally: bool = False,
        params: dict = None,
    ):
        r"""
        A method to get the current metric value for the specified metric and label configuration.

        :param metric_name: (str) The name of the metric.
        :param label_config: (dict) A dictionary specifying metric labels and their
            values.
        :param start_time:  (datetime) A datetime object that specifies the metric range start time.
        :param end_time: (datetime) A datetime object that specifies the metric range end time.
        :param chunk_size: (timedelta) Duration of metric data downloaded in one request. For
            example, setting it to timedelta(hours=3) will download 3 hours worth of data in each
            request made to the prometheus host
        :param store_locally: (bool) If set to True, will store data locally at,
            `"./metrics/hostname/metric_date/name_time.json.bz2"`
        :param params: (dict) Optional dictionary containing GET parameters to be
            sent along with the API request, such as "time"
        :return: (list) A list of metric data for the specified metric in the given time
            range
        :raises:
            (RequestException) Raises an exception in case of a connection error
            (PrometheusApiClientException) Raises in case of non 200 response status code

        """
        params = params or {}
        data = []

        _LOGGER.debug("start_time: %s", start_time)
        _LOGGER.debug("end_time: %s", end_time)
        _LOGGER.debug("chunk_size: %s", chunk_size)

        if not (isinstance(start_time, datetime)
                and isinstance(end_time, datetime)):
            raise TypeError(
                "start_time and end_time can only be of type datetime.datetime"
            )

        if not chunk_size:
            chunk_size = end_time - start_time
        if not isinstance(chunk_size, timedelta):
            raise TypeError(
                "chunk_size can only be of type datetime.timedelta")

        start = round(start_time.timestamp())
        end = round(end_time.timestamp())

        if (end_time -
                start_time).total_seconds() < chunk_size.total_seconds():
            sys.exit("specified chunk_size is too big")
        chunk_seconds = round(chunk_size.total_seconds())

        if label_config:
            label_list = [
                str(key + "=" + "'" + label_config[key] + "'")
                for key in label_config
            ]
            query = metric_name + "{" + ",".join(label_list) + "}"
        else:
            query = metric_name
        _LOGGER.debug("Prometheus Query: %s", query)

        while start < end:
            if start + chunk_seconds > end:
                chunk_seconds = end - start

            # using the query API to get raw data
            response = requests.get(
                "{0}/api/v1/query".format(self.url),
                params={
                    **{
                        "query": query + "[" + str(chunk_seconds) + "s" + "]",
                        "time": start + chunk_seconds,
                    },
                    **params,
                },
                verify=self.ssl_verification,
                headers=self.headers,
            )
            if response.status_code == 200:
                data += response.json()["data"]["result"]
            else:
                raise PrometheusApiClientException(
                    "HTTP Status Code {} ({})".format(response.status_code,
                                                      response.content))
            if store_locally:
                # store it locally
                self._store_metric_values_local(
                    metric_name,
                    json.dumps(response.json()["data"]["result"]),
                    start + chunk_seconds,
                )

            start += chunk_seconds
        return data
Example #51
0
    def generate(self, departureTime:datetime.time, delay: datetime.timedelta, category, number, sequence):
        """
        :param departureTime:
        :param category:
        :param number:
        :param sequence: list of station IDs
        :return:
        """

        orders, goodness = self._generate_stations(sequence)

        aux = []
        if self.display_hour is not None:
            flap_id = next((flap.flap_id for flap in self.display_hour.flaps.values() if flap.ids[0] == departureTime.hour), None)
            if flap_id is not None:
                aux.append(self.display_hour.flaps[flap_id])

        if self.display_minu is not None:
            flap_id = next((flap.flap_id for flap in self.display_minu.flaps.values() if flap.ids[0] == departureTime.minute), None)
            if flap_id is not None:
                aux.append(self.display_minu.flaps[flap_id])

        if self.display_dela is not None:
            minutes = delay.total_seconds() / 60

            if minutes > 0:
                best_flap = None
                best_diff = None
                for flap in self.display_dela.flaps.values():
                    if type(flap.ids[0]) is not int:
                        continue

                    diff = abs(minutes - int(flap.ids[0]))
                    if diff <= 30 and (best_flap is None or best_diff >= diff):
                        best_flap = flap
                        best_diff = diff

                        if diff == 0:
                            # perfect match
                            break

                if best_flap is not None:
                    aux.append(best_flap)

        #TODO: move to _generate_stations() and _goodness()
        if self.display_type is not None:
            best_flap = None
            best_rating = 0
            for flap in self.display_type.flaps.values():
                rating = 0
                requirements = True
                if category == flap.ids[0]:
                    rating += 1
                else:
                    requirements = False

                if number == str(flap.ids[2]):
                    rating += 4
                elif flap.ids[2] is None:
                    rating += 1
                else:
                    requirements = False

                if flap.ids[4] is not None and int(flap.ids[4]) in sequence:
                    rating += 1

                if flap.ids[5] is not None and int(flap.ids[5]) in sequence:
                    rating += 1

                if requirements and (best_flap is None or rating > best_rating):
                    best_flap = flap
                    best_rating = rating

            if best_flap is not None:
                aux.append(best_flap)
                print("Type rating: {} {}".format(best_rating, best_flap))

        return list(orders)+aux, goodness
Example #52
0
 def async_set_delayed_turn_off(self, time_period: timedelta):
     """Set delay off. The unit is different per device."""
     yield from self._try_command("Setting the delay off failed.",
                                  self._light.delay_off,
                                  time_period.total_seconds())
Example #53
0
 def pos(date: timedelta):
     return int(round(date.total_seconds() / frame_step))
Example #54
0
 def _set_timedelta(d: timedelta):
     return d.total_seconds()
Example #55
0
 async def async_set_delayed_turn_off(self, time_period: timedelta):
     """Set delayed turn off."""
     await self._try_command(
         "Setting the turn off delay failed.",
         self._light.delay_off, round(time_period.total_seconds() / 60))
Example #56
0
def wait(period: timedelta) -> None:
    time.sleep(period.total_seconds())
Example #57
0
    def __init__(self, max_size:int, latency:timedelta):
        self._max_size = max_size if max_size > 0 else 1
        self._latency = latency.total_seconds()

        super().__init__(list, latency / 2)
        self.last_updated = monotonic()
Example #58
0
 def total_milliseconds(expire_after: timedelta):
     return int(expire_after.total_seconds() * 1000.0)
Example #59
0
def _count_timedelta(delta: _datetime.timedelta, step: int, seconds_in_interval: int) -> int:
    """Helper function for iterate.  Finds the number of intervals in the timedelta."""
    return int(delta.total_seconds() / (seconds_in_interval * step))
Example #60
0
 def to_wire(self, value: timedelta) -> float:
     return value.total_seconds()