Beispiel #1
0
    def acquire_lock(self, key, lease_time=3600, **kwargs) -> Lock:

        lockfile = os.path.join(self.basepath, f'rssalertbot-{key}.lock')

        # create our release callback
        def release():
            self.release_lock(key)

        try:
            self.locks[key] = zc.lockfile.LockFile(lockfile, content_template='{pid};{hostname}')
            log.debug(f"Acquired lock '{key}' on {lockfile}")

            return Lock(release, expires = pendulum.now().add(3600))

        except zc.lockfile.LockError:

            # we've already got a lockfile here, so we should check the age of it
            # - if it's older than the lease time, we can re-acquire, otherwise
            # nope!
            stats = os.stat(lockfile)
            expires = pendulum.from_timestamp(stats.st_mtime)
            age = pendulum.now() - expires
            if age > lease_time:
                log.debug(f"Acquired lock '{key}' on {lockfile}")
                return Lock(release, expires)

            # no you can't have this lock
            log.debug(f"Lock '{key}' denied")
            raise LockNotAcquired()
Beispiel #2
0
def do_maybe_record_timer(*event):
    if not dataview.timer_id:
        return
    item_id = dataview.timer_id
    job_id = dataview.timer_job
    hsh = DBITEM.get(doc_id=item_id)
    item_info = f"{hsh['itemtype']} {hsh['summary']}"

    now = pendulum.now()
    if dataview.timer_status == 1: #running
        time = dataview.timer_time + (now - dataview.timer_start)
    else:
        time = dataview.timer_time
    completed = pendulum.now()
    completed_str = format_datetime(completed)
    time_str = format_duration(time)

    def coroutine():
        dialog = ConfirmDialog("record time", f"item: {item_info}\nelapsed time: {time_str}\n\nrecord time and close timer?")
        record_close = yield From(show_dialog_as_float(dialog))
        if record_close:
            item.record_timer(item_id, job_id, completed, time)
            set_text(dataview.show_active_view())
            dataview.timer_clear()
            if item_id in dataview.itemcache:
                del dataview.itemcache[item_id]
            loop = get_event_loop()
            loop.call_later(0, data_changed, loop)

    ensure_future(coroutine())
Beispiel #3
0
def do_maybe_cancel_timer(*event):
    if not dataview.timer_id:
        return
    item_id = dataview.timer_id
    job_id = dataview.timer_job
    hsh = DBITEM.get(doc_id=item_id)
    item_info = f"{hsh['itemtype']} {hsh['summary']}"

    stopped_timer = False
    now = pendulum.now()
    if dataview.timer_status == 1: #running
        time = dataview.timer_time + (now - dataview.timer_start)
    else:
        time = dataview.timer_time
    completed = pendulum.now()
    completed_str = format_datetime(completed)
    time_str = format_duration(time)

    def coroutine():
        dialog = ConfirmDialog("cancel timer", f"item: {item_info}\nelapsed time: {time_str}\n\nclose timer without recording?")
        record_cancel = yield From(show_dialog_as_float(dialog))
        if record_cancel:
            dataview.timer_clear()
            set_text(dataview.show_active_view())
            get_app().invalidate()

    ensure_future(coroutine())
Beispiel #4
0
    def acquire_lock(self, key, owner_name: str='unknown', lease_time: int=3600) -> Lock:

        try:
            old = DynamoLock.get(key)
            # If the lock is not yet expired, and you aren't the owner, you can't have it
            if (pendulum.now('UTC') < old.expires) and old.owner_name != owner_name:
                log.debug(f"Lock {key} denied")
                raise LockAccessDenied()

            # delete the old lock
            old.delete()

        except DoesNotExist:
            pass

        # create the new lock
        rec = DynamoLock(
            key         = key,
            expires     = pendulum.now('UTC').add(seconds = lease_time),
            owner_name  = owner_name,
        )
        rec.save()
        log.debug(f"Lock {rec.key} acquired, expires {rec.expires}")

        def release():
            self.release_lock(key, owner_name)

        # return lock
        return Lock(release, expires=rec.expires)
Beispiel #5
0
def test_diff_for_humans_absolute_years():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "1 year" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(years=1), True
        )
        assert "1 year" == pendulum.now().diff_for_humans(
            pendulum.now().add(years=1), True
        )
Beispiel #6
0
def test_diff_for_humans_absolute_months():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "2 months" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(months=2), True
        )
        assert "2 months" == pendulum.now().diff_for_humans(
            pendulum.now().add(months=2), True
        )
Beispiel #7
0
def test_diff_for_humans_absolute_hours():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "3 hours" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(hours=3), True
        )
        assert "3 hours" == pendulum.now().diff_for_humans(
            pendulum.now().add(hours=3), True
        )
Beispiel #8
0
def test_diff_for_humans_absolute_minutes():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "30 minutes" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(minutes=30), True
        )
        assert "30 minutes" == pendulum.now().diff_for_humans(
            pendulum.now().add(minutes=30), True
        )
Beispiel #9
0
def test_diff_for_humans_absolute_seconds():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "59 seconds" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(seconds=59), True
        )
        assert "59 seconds" == pendulum.now().diff_for_humans(
            pendulum.now().add(seconds=59), True
        )
Beispiel #10
0
def test_diff_for_humans_absolute_hours():
    with pendulum.test(pendulum.today().at(12, 34, 56)):
        now = pendulum.now().time()

        assert now.diff_for_humans(now.subtract(hours=3), True) == "3 hours"
        now = pendulum.now().time()

        assert now.diff_for_humans(now.add(hours=3), True) == "3 hours"
Beispiel #11
0
def test_diff_for_humans_other_and_month():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "4 weeks before" == pendulum.now().diff_for_humans(
            pendulum.now().add(weeks=4)
        )
        assert "1 month before" == pendulum.now().diff_for_humans(
            pendulum.now().add(months=1)
        )
Beispiel #12
0
def test_diff_for_humans_other_and_future_month():
    with pendulum.test(pendulum.datetime(2012, 1, 1, 1, 2, 3)):
        assert "4 weeks after" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(weeks=4)
        )
        assert "1 month after" == pendulum.now().diff_for_humans(
            pendulum.now().subtract(months=1)
        )
Beispiel #13
0
def test_diff_for_humans_absolute_minutes():
    with pendulum.test(pendulum.today().at(12, 34, 56)):
        now = pendulum.now().time()

        assert now.diff_for_humans(now.subtract(minutes=30), True) == "30 minutes"
        now = pendulum.now().time()

        assert now.diff_for_humans(now.add(minutes=30), True) == "30 minutes"
Beispiel #14
0
def test_diff_for_humans_absolute_seconds():
    with pendulum.test(pendulum.today().at(12, 34, 56)):
        now = pendulum.now().time()

        assert now.diff_for_humans(now.subtract(seconds=59), True) == "59 seconds"
        now = pendulum.now().time()

        assert now.diff_for_humans(now.add(seconds=59), True) == "59 seconds"
    def test_pointless(self):
        job = factories.IngestJobFactory(raw__datestamp=pendulum.now().subtract(hours=2))
        factories.IngestJobFactory(suid=job.suid, raw__datestamp=pendulum.now().subtract(hours=1))

        ingest(job_id=job.id)

        job.refresh_from_db()
        assert job.status == job.STATUS.skipped
        assert job.error_context == job.SkipReasons.pointless.value
Beispiel #16
0
 def __call__(self, loop):
     start = pendulum.now()
     ps = yield from asyncio.create_subprocess_shell(
         self.cmd,
         stdout=subprocess.PIPE,
         stderr=subprocess.STDOUT,
         loop=loop)
     output = (yield from ps.communicate())[0]
     self.elapsed += pendulum.now() - start
     self.run_count += 1
     return ps, output
Beispiel #17
0
    def __repr__(self):
        repr = """{simulation_name:=^30}

created:      {created_date}
started:      {started_date}
last:         {last_date}

time:         {t:g}
iteration:    {iter:g}

last step:    {step_time}
total time:   {running_time}


Physical parameters
-------------------
{parameters}

Hook function
-------------
{hook_source}

=========== Model ===========
{model_repr}"""
        repr = repr.format(simulation_name=" %s " % self.id,
                           parameters="\n\t".join(
                               [("%s:" % key).ljust(12) +
                                pprint.pformat(value)
                                for key, value
                                in self.parameters.items()]),
                           t=self.t,
                           iter=self.i,
                           model_repr=self.model,
                           hook_source=inspect.getsource(self._hook),
                           step_time=(None if not self._last_running else
                                      pendulum.now()
                                      .subtract(
                                          seconds=self._last_running)
                                      .diff()),
                           running_time=(pendulum.now()
                                         .subtract(
                               seconds=self._total_running)
                               .diff()),
                           created_date=(self._created_timestamp
                                         .to_cookie_string()),
                           started_date=(self._started_timestamp
                                         .to_cookie_string()
                                         if self._started_timestamp
                                         else "None"),
                           last_date=(self._last_timestamp
                                      .to_cookie_string()
                                      if self._last_timestamp
                                      else "None"))
        return repr
Beispiel #18
0
    def __repr__(self):
        repr = """last:   {last}
total:  {total}"""
        return repr.format(last=(pendulum.now()
                                 .subtract(
            seconds=self.last)
            .diff()),
            total=(pendulum.now()
                   .subtract(
                seconds=self.total)
            .diff()))
Beispiel #19
0
    def __init__(self, model, fields, parameters, dt, t=0, tmax=None,
                 id=None, hook=null_hook,
                 scheme=schemes.RODASPR,
                 time_stepping=True, **kwargs):

        def intersection_kwargs(kwargs, function):
            """Inspect the function signature to identify the relevant keys
            in a dictionary of named parameters.
            """
            func_signature = inspect.signature(function)
            func_parameters = func_signature.parameters
            kwargs = {key: value
                      for key, value
                      in kwargs.items() if key in func_parameters}
            return kwargs
        kwargs["time_stepping"] = time_stepping
        self.id = str(uuid1())[:6] if not id else id
        self.model = model
        self.parameters = parameters
        self.fields = model.fields_template(**fields)
        self.t = t
        self.user_dt = self.dt = dt
        self.tmax = tmax
        self.i = 0
        self._stream = streamz.Stream()
        self._pprocesses = []

        self._scheme = scheme(model,
                              **intersection_kwargs(kwargs,
                                                    scheme.__init__))
        if (time_stepping and
            self._scheme not in [schemes.RODASPR,
                                 schemes.ROS3PRL,
                                 schemes.ROS3PRw]):
            self._scheme = schemes.time_stepping(
                self._scheme,
                **intersection_kwargs(kwargs,
                                      schemes.time_stepping))
        self.status = 'created'

        self._total_running = 0
        self._last_running = 0
        self._created_timestamp = pendulum.now()
        self._started_timestamp = None
        self._last_timestamp = None
        self._actual_timestamp = pendulum.now()
        self._hook = hook
        self._container = None
        self._iterator = self.compute()
Beispiel #20
0
def test_is_birthday():
    with pendulum.test(pendulum.now()):
        d = pendulum.now()
        a_birthday = d.subtract(years=1)
        assert a_birthday.is_birthday()
        not_a_birthday = d.subtract(days=1)
        assert not not_a_birthday.is_birthday()
        also_not_a_birthday = d.add(days=2)
        assert not also_not_a_birthday.is_birthday()

    d1 = pendulum.datetime(1987, 4, 23)
    d2 = pendulum.datetime(2014, 9, 26)
    d3 = pendulum.datetime(2014, 4, 23)
    assert not d2.is_birthday(d1)
    assert d3.is_birthday(d1)
def sign_in(mocker, user):
    secret = os.environ.get('JWT_SECRET', 'secret')
    claims = dict(sub=user.id, iat=pendulum.now(), exp=pendulum.tomorrow())
    mocker.patch.object(jwt, 'decode', return_value=claims)

    access_token = jwt.encode(claims, secret).decode("utf-8")
    return f'JWT {access_token}'
Beispiel #22
0
    def holiday_cmd(self, mask, target, args):
        """
        Show the upcoming non-weekend holidays
        for all countries in config.

            %%holiday
        """
        country_codes = self.config.get("holiday_countries")

        if not country_codes:
            return

        now = pendulum.now(tz=self.config.get("default_timezone"))

        for country_code in country_codes.split(","):
            date, name = get_next_non_weekend_holiday(now, country_code)

            remaining_days = (
                pendulum.date(date.year, date.month, date.day)
                - now
            ).in_days()

            plural = "" if remaining_days == 1 else "s"

            yield (
                f"Next holiday for {country_code}:"
                f" {date.isoformat()} ({name}"
                f", in {remaining_days} day{plural})"
            )
Beispiel #23
0
async def reset(request, token):
    try:
        reset = db.session.query(PasswordReset).filter_by(UUID=token).first()
        if not reset:
            return response.json({"error": "Invalid reset token"}, 404)

        if not reset.isValid:
            return response.json({"error": "Reset token has already been used"}, 404)

        if pendulum.now("UTC") > pendulum.instance(reset.expireTime):
            return response.json({"error": "Reset token has expired."}, 400)

        # Invalidate all resets for this user
        # db.session.query(PasswordReset).filter_by(userId=reset.userId).update(
        #     {"isValid": False}
        # )
        db.session.commit()

        user = utils.get_account_by_id(reset.userId)
        userData = user.serialize()
        userData["jwt"] = user.gen_token(expire_hours=1)
        userData["message"] = "Valid token provided. Prompt user to change password"

        return response.json(userData, 200)

    except Exception as e:
        return utils.exeption_handler(e, "Password reset confirmation failed", 500)
Beispiel #24
0
def elig(bot, trigger):
    """Cloak eligibility tool"""
    need_verified_email = True
    need_edit_count = 250
    need_months_registered = 3

    if not trigger.group(2):
        return bot.say('To be eligible for a cloak you need: ' + ['', 'a verified email, '][need_verified_email] + 'to be registered for ' + str(need_months_registered) + ' months,' + ' and have ' + str(need_edit_count) + ' edits.')

    query = trigger.group(3)

    r = requests.get('https://en.wikipedia.org/w/api.php?action=query&meta=globaluserinfo&guiuser='******'&guiprop=editcount&format=json')
    try:
        required_registration_time = pendulum.now().subtract(months=need_months_registered)
        actual_registration_time = pendulum.parse(str(r.json()['query']['globaluserinfo']['registration']))
    except KeyError:
        return bot.say('No such user (please note that usernames are case-sensitive).')
    if actual_registration_time > required_registration_time:
        return bot.say('User: "******" is ineligible for a cloak because they have not been registered onwiki for long enough (there may be other reasons why this user is ineligible).')

    actual_edit_count = r.json()['query']['globaluserinfo']['editcount']
    if actual_edit_count < need_edit_count:
        return bot.say('User: "******" is ineligible for a cloak because their edit count is not high enough. They need ' + str(need_edit_count - actual_edit_count) + ' more edits to be eligible under this criterion (there may be other reasons why this user is ineligible).')

    r2 = requests.get('https://en.wikipedia.org/w/api.php?action=query&list=users&ususers=' + query + '&usprop=emailable&format=json')
    try:
        r2.json()['query']['users'][0]['emailable']
    except KeyError:
        return bot.say('User: "******" is ineligible for a cloak because they do not have a verified email address (this problem may be caused be the user disabling Special:EmailUser in their English Wikipedia preferences).')

    bot.say('User: "******" is eligible for a cloak. They registered onwiki about ' + actual_registration_time.diff_for_humans() + ', have ' + str(actual_edit_count) + ' edits and have a verified email address.')
Beispiel #25
0
    def compute(self):
        """Generator which yield the actual state of the system every dt.

        Yields
        ------
        tuple : t, fields
            Actual time and updated fields container.
        """
        fields = self.fields
        t = self.t
        pars = self.parameters
        self._started_timestamp = pendulum.now()
        self.stream.emit(self)

        try:
            while True:
                t, fields, pars = self._compute_one_step(t, fields, pars)

                self.i += 1
                self.t = t
                self.fields = fields
                self.parameters = pars
                for pprocess in self.post_processes:
                    pprocess.function(self)
                self.stream.emit(self)
                yield self.t, self.fields

                if self.tmax and (isclose(self.t, self.tmax)):
                    self._end_simulation()
                    return

        except RuntimeError:
            self.status = 'failed'
            raise
    def user(id, email_address, supplier_code, supplier_name, name, is_token_valid=True, locked=False, active=True,
             role='buyer', terms_accepted_at=None):

        now = pendulum.now('UTC')

        hours_offset = -1 if is_token_valid else 1
        password_changed_date = now + timedelta(hours=hours_offset)

        if terms_accepted_at is None:
            terms_accepted_at = now

        user = {
            "id": id,
            "emailAddress": email_address,
            "name": name,
            "role": role,
            "locked": locked,
            'active': active,
            'passwordChangedAt': password_changed_date.to_iso8601_string(),
            'termsAcceptedAt': terms_accepted_at.to_iso8601_string()
        }

        if supplier_code:
            supplier = {
                "supplierCode": supplier_code,
                "name": supplier_name,
            }
            user['role'] = 'supplier'
            user['supplier'] = supplier
        return {
            "users": user
        }
Beispiel #27
0
    def diff(self, dt=None, abs=True):
        """
        Returns the difference between two Time objects as an Duration.

        :type dt: Time or None

        :param abs: Whether to return an absolute interval or not
        :type abs: bool

        :rtype: Duration
        """
        if dt is None:
            dt = pendulum.now().time()
        else:
            dt = self.__class__(dt.hour, dt.minute, dt.second, dt.microsecond)

        us1 = (
            self.hour * SECS_PER_HOUR + self.minute * SECS_PER_MIN + self.second
        ) * USECS_PER_SEC

        us2 = (
            dt.hour * SECS_PER_HOUR + dt.minute * SECS_PER_MIN + dt.second
        ) * USECS_PER_SEC

        klass = Duration
        if abs:
            klass = AbsoluteDuration

        return klass(microseconds=us2 - us1)
def get_domain_metrics():
    metrics = {}

    query = '''
                            SELECT name, count(status), status::TEXT
                            FROM
                              supplier_domain INNER JOIN domain ON supplier_domain.domain_id = domain.id
                              WHERE status = 'assessed' OR status = 'unassessed'
                            GROUP BY name, status::TEXT
                            UNION
                            SELECT key, count(*),
                            (CASE
                                WHEN application.status = 'submitted' THEN 'submitted'
                                WHEN application.status = 'saved' THEN 'unsubmitted'
                            END) status
                            FROM
                              application, json_each(application.data->'services') badge
                            WHERE "value"::TEXT = 'true'
                            AND (application.status = 'saved' OR application.status = 'submitted')
                            AND (application.type = 'new' OR application.type = 'upgrade')
                            GROUP BY key, status
                            '''
    for (domain, count, status) in db.session.execute(query).fetchall():
        if domain not in metrics:
            metrics[domain] = {}
            metrics[domain]['domain'] = domain
            metrics[domain]['timestamp'] = pendulum.now().to_iso8601_string()
        metrics[domain][status] = count

    metrics = list(metrics.values())
    return jsonify(metrics)
 def validate_closed_at(self):
     if 'closedAt' not in self.data or not self.data['closedAt']:
         return False
     parsed = pendulum.parse(self.data['closedAt']).in_timezone('Australia/Canberra').start_of('day')
     if parsed < pendulum.now('Australia/Canberra').add(days=2).start_of('day'):
         return False
     return True
    def test_read_with_empty_metadata(self):
        ts = pendulum.now()
        logs, metadatas = self.es_task_handler.read(self.ti, 1, {})
        self.assertEqual(1, len(logs))
        self.assertEqual(len(logs), len(metadatas))
        self.assertEqual(self.test_message, logs[0])
        self.assertFalse(metadatas[0]['end_of_log'])
        # offset should be initialized to 0 if not provided.
        self.assertEqual(1, metadatas[0]['offset'])
        # last_log_timestamp will be initialized using log reading time
        # if not last_log_timestamp is provided.
        self.assertTrue(timezone.parse(metadatas[0]['last_log_timestamp']) > ts)

        # case where offset is missing but metadata not empty.
        self.es.delete(index=self.index_name, doc_type=self.doc_type, id=1)
        logs, metadatas = self.es_task_handler.read(self.ti, 1, {'end_of_log': False})
        self.assertEqual(1, len(logs))
        self.assertEqual(len(logs), len(metadatas))
        self.assertEqual([''], logs)
        self.assertFalse(metadatas[0]['end_of_log'])
        # offset should be initialized to 0 if not provided.
        self.assertEqual(0, metadatas[0]['offset'])
        # last_log_timestamp will be initialized using log reading time
        # if not last_log_timestamp is provided.
        self.assertTrue(timezone.parse(metadatas[0]['last_log_timestamp']) > ts)
Beispiel #31
0
def test_sensor_tick_range(graphql_context):
    external_repository = graphql_context.get_repository_location(
        main_repo_location_name()).get_repository(main_repo_name())
    graphql_context.instance.reconcile_scheduler_state(external_repository)

    sensor_name = "always_no_config_sensor"
    external_sensor = external_repository.get_external_sensor(sensor_name)
    sensor_selector = infer_sensor_selector(graphql_context, sensor_name)

    # test with no job state
    result = execute_dagster_graphql(
        graphql_context,
        GET_SENSOR_TICK_RANGE_QUERY,
        variables={
            "sensorSelector": sensor_selector,
            "dayRange": None,
            "dayOffset": None
        },
    )
    assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 0

    # turn the sensor on
    graphql_context.instance.add_job_state(
        JobState(external_sensor.get_external_origin(), JobType.SENSOR,
                 JobStatus.RUNNING))

    now = pendulum.now("US/Central")
    one = now.subtract(days=2).subtract(hours=1)
    with pendulum.test(one):
        _create_tick(graphql_context.instance)

    two = now.subtract(days=1).subtract(hours=1)
    with pendulum.test(two):
        _create_tick(graphql_context.instance)

    three = now.subtract(hours=1)
    with pendulum.test(three):
        _create_tick(graphql_context.instance)

    result = execute_dagster_graphql(
        graphql_context,
        GET_SENSOR_TICK_RANGE_QUERY,
        variables={
            "sensorSelector": sensor_selector,
            "dayRange": None,
            "dayOffset": None
        },
    )
    assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 3

    result = execute_dagster_graphql(
        graphql_context,
        GET_SENSOR_TICK_RANGE_QUERY,
        variables={
            "sensorSelector": sensor_selector,
            "dayRange": 1,
            "dayOffset": None
        },
    )
    assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
    assert result.data["sensorOrError"]["sensorState"]["ticks"][0][
        "timestamp"] == three.timestamp()

    result = execute_dagster_graphql(
        graphql_context,
        GET_SENSOR_TICK_RANGE_QUERY,
        variables={
            "sensorSelector": sensor_selector,
            "dayRange": 1,
            "dayOffset": 1
        },
    )
    assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 1
    assert result.data["sensorOrError"]["sensorState"]["ticks"][0][
        "timestamp"] == two.timestamp()

    result = execute_dagster_graphql(
        graphql_context,
        GET_SENSOR_TICK_RANGE_QUERY,
        variables={
            "sensorSelector": sensor_selector,
            "dayRange": 2,
            "dayOffset": None,
        },
    )
    assert len(result.data["sensorOrError"]["sensorState"]["ticks"]) == 2
def add_log(msg):
    logging = "{} - {} \n".format(pendulum.now('Asia/Shanghai').to_datetime_string(), msg)
    with open("./amac_log.txt", "a") as f:
        f.write(logging)
Beispiel #33
0
    def generate_worksheet(self, workbook, stats_type):
        formatters = self.setup_workbook_formatters(workbook)

        if stats_type == 'aggregated':
            write = self.write_total_stats
            create_worksheet = self.create_totals_worksheet
        elif stats_type == 'location_segmentation':
            write = self.write_location_segmentation_stats
            create_worksheet = self.create_location_segmentation_worksheet
        elif stats_type == 'theme_segmentation':
            write = self.write_theme_segmentation_stats
            create_worksheet = self.create_theme_segmentation_worksheet
        elif stats_type == 'impact_survey':
            write = self.write_impact_survey_stats
            create_worksheet = self.create_impact_survey_worksheet

        for year in range(self.start_date.year, self.end_date.year + 1):
            statistics_start_date = pendulum.create(year, 1, 1, 0, 0, 0)
            statistics_end_date = pendulum.create(year, 12, 31, 23, 59, 59)

            # Generate data by year
            logger.info('tenant:{} Yearly ({}): end_date:{}'.format(
                self.tenant, stats_type,
                statistics_end_date.to_iso8601_string()))

            # Worksheet for Totals by Year
            worksheet = create_worksheet(workbook, year)

            # Add label
            row = 1
            worksheet.write(row, 0, 'By Year', formatters['metrics_header'])

            row += 1
            write(worksheet=worksheet,
                  row=row,
                  statistic_type='yearly',
                  start_date=statistics_start_date,
                  end_date=statistics_end_date)

            # Generate data by month
            row += 1
            worksheet.write(row, 0, 'By Month', formatters['metrics_header'])

            row += 1
            self.monthly_statistics_row_start = row
            for month in range(1, 13):
                statistics_end_date = pendulum.create(year, month,
                                                      1).end_of('month')

                # Stop if the end date is in the next month from current date
                if statistics_end_date < pendulum.now().add(months=1):
                    logger.info('tenant:{} Monthly ({}): end_date:{}'.format(
                        self.tenant, stats_type,
                        statistics_end_date.to_iso8601_string()))
                    write(worksheet=worksheet,
                          row=row,
                          statistic_type='monthly',
                          start_date=statistics_start_date,
                          end_date=statistics_end_date)
                    row += 1
                    self.monthly_statistics_row_end = row

            # Generate data by week
            worksheet.write(row, 0, 'By Week', formatters['metrics_header'])

            time_period = pendulum.period(statistics_start_date,
                                          pendulum.create(year, 12, 31))
            row += 1
            self.weekly_statistics_row_start = row
            for period in time_period.range('weeks'):

                # Curtail the last week of the year to end with the end of the year
                # E.g. The end day of the last week of the year could lie in the next year, in this case we just
                # use the last day of the year as the end day of the week
                statistics_end_date = period.end_of('week') \
                    if period.end_of('week') < statistics_start_date.end_of('year') \
                    else statistics_start_date.end_of('year')

                if statistics_end_date <= pendulum.now().add(weeks=1):
                    logger.info('tenant:{} Weekly ({}): end_date:{}'.format(
                        self.tenant, stats_type,
                        statistics_end_date.to_iso8601_string()))
                    write(worksheet=worksheet,
                          row=row,
                          statistic_type='weekly',
                          start_date=statistics_start_date,
                          end_date=statistics_end_date)

                    row += 1
                    self.weekly_statistics_row_end = row - 1

        if stats_type == 'aggregated':
            self.create_monthly_charts(workbook, self.charts)
Beispiel #34
0
def sync_table(connection, catalog_entry, state, columns):
    # If there is an existing bookmark, use it; otherwise, use replication_key
    replication_key_value = None
    if not state.get("bookmarks", {}).get(catalog_entry.tap_stream_id):
        singer.write_bookmark(state, catalog_entry.tap_stream_id,
                              catalog_entry.replication_key, None)
    else:
        # Start with the bookmark.
        replication_key_value = singer.get_bookmark(
            state, catalog_entry.tap_stream_id, catalog_entry.replication_key)

    # with connection.cursor() as cursor:
    cursor = connection.cursor()

    # Build the sql for this stream.
    select_sql = generate_select_sql(catalog_entry, columns)

    # If bookmark exists, modify the query.
    if replication_key_value is not None:
        if (catalog_entry.schema.properties[
                catalog_entry.replication_key].format == "date-time"):
            replication_key_value = pendulum.parse(replication_key_value)
            replication_key_max = pendulum.now("UTC").subtract(minutes=20)

        select_sql += " WHERE {0} >= '{1}' AND {0} < {2} ORDER BY {3} ASC".format(
            catalog_entry.replication_key,
            replication_key_value,
            replication_key_max,
            catalog_entry.replication_key,
        )

    elif catalog_entry.replication_key is not None:
        select_sql += " ORDER BY {} ASC".format(catalog_entry.replication_key)

    # time to sync.
    LOGGER.info("Running %s", select_sql)
    cursor.execute(select_sql)

    row = cursor.fetchone()
    rows_saved = 0

    with metrics.record_counter(catalog_entry.tap_stream_id) as counter:
        counter.tags["table"] = catalog_entry.table
        while row:
            counter.increment()
            rows_saved += 1

            # format record
            rec = process_row(row, columns)

            # resolve against metadata
            with Transformer() as transformer:
                rec = transformer.transform(
                    rec,
                    catalog_entry.schema.to_dict(),
                    metadata.to_map(catalog_entry.metadata),
                )

            # write to singer.
            singer.write_record(catalog_entry.tap_stream_id, rec)

            # Perhaps the more modern way of managing state.
            if catalog_entry.replication_method == "INCREMENTAL":
                singer.write_bookmark(
                    state,
                    catalog_entry.tap_stream_id,
                    catalog_entry.replication_key,
                    rec[catalog_entry.replication_key],
                )

            if rows_saved % 100 == 0:
                singer.write_state(state)

            row = cursor.fetchone()

        singer.write_state(state)

    return counter.value
async def amain(
    backend_address="ws://*****:*****@laptop",
    bob_device_id="bob@laptop",
    other_device_name="pc",
    alice_workspace="alicews",
    bob_workspace="bobws",
    password="******",
    administrator_token=DEFAULT_ADMINISTRATOR_TOKEN,
    force=False,
):

    configure_logging("WARNING")

    config_dir = get_default_config_dir(os.environ)
    organization_id = OrganizationID(organization_id)
    backend_address = BackendAddr(backend_address)
    alice_device_id = DeviceID(alice_device_id)
    bob_device_id = DeviceID(bob_device_id)
    alice_slugid = f"{organization_id}:{alice_device_id}"
    bob_slugid = f"{organization_id}:{bob_device_id}"

    # Create organization

    async with backend_administrator_cmds_factory(backend_address, administrator_token) as cmds:

        bootstrap_token = await cmds.organization_create(organization_id)

        organization_bootstrap_addr = BackendOrganizationBootstrapAddr.build(
            backend_address, organization_id, bootstrap_token
        )

    # Bootstrap organization and Alice user

    async with backend_anonymous_cmds_factory(organization_bootstrap_addr) as cmds:
        root_signing_key = SigningKey.generate()
        root_verify_key = root_signing_key.verify_key
        organization_addr = organization_bootstrap_addr.generate_organization_addr(root_verify_key)

        alice_device = generate_new_device(alice_device_id, organization_addr)

        save_device_with_password(config_dir, alice_device, password, force=force)

        now = pendulum.now()
        certified_user = certify_user(
            None, root_signing_key, alice_device.user_id, alice_device.public_key, now
        )
        certified_device = certify_device(
            None, root_signing_key, alice_device_id, alice_device.verify_key, now
        )

        await cmds.organization_bootstrap(
            organization_bootstrap_addr.organization_id,
            organization_bootstrap_addr.bootstrap_token,
            root_verify_key,
            certified_user,
            certified_device,
        )

    # Create a workspace for Alice

    config = load_config(config_dir, debug="DEBUG" in os.environ)
    async with logged_core_factory(config, alice_device) as core:
        await core.fs.workspace_create(f"/{alice_workspace}")

    # Register a new device for Alice

    token = generate_invitation_token()
    other_alice_device_id = DeviceID("@".join((alice_device.user_id, other_device_name)))
    other_alice_slugid = f"{organization_id}:{other_alice_device_id}"

    async def invite_task():
        async with backend_cmds_factory(
            alice_device.organization_addr, alice_device.device_id, alice_device.signing_key
        ) as cmds:
            await invite_and_create_device(alice_device, cmds, other_device_name, token)

    async def claim_task():
        async with backend_anonymous_cmds_factory(alice_device.organization_addr) as cmds:
            other_alice_device = await retry(claim_device, cmds, other_alice_device_id, token)
            save_device_with_password(config_dir, other_alice_device, password, force=force)

    async with trio.open_nursery() as nursery:
        nursery.start_soon(invite_task)
        nursery.start_soon(claim_task)

    # Invite Bob in

    token = generate_invitation_token()

    async def invite_task():
        async with backend_cmds_factory(
            alice_device.organization_addr, alice_device.device_id, alice_device.signing_key
        ) as cmds:
            await invite_and_create_user(
                alice_device, cmds, bob_device_id.user_id, token, is_admin=True
            )

    async def claim_task():
        async with backend_anonymous_cmds_factory(alice_device.organization_addr) as cmds:
            bob_device = await retry(claim_user, cmds, bob_device_id, token)
            save_device_with_password(config_dir, bob_device, password, force=force)

    async with trio.open_nursery() as nursery:
        nursery.start_soon(invite_task)
        nursery.start_soon(claim_task)

    # Create bob workspace and share with Alice

    bob_device = load_device_with_password(
        config.config_dir, organization_id, bob_device_id, password
    )

    async with logged_core_factory(config, bob_device) as core:
        await core.fs.workspace_create(f"/{bob_workspace}")
        await core.fs.share(f"/{bob_workspace}", alice_device_id.user_id)

    # Share Alice workspace with bob

    async with logged_core_factory(config, alice_device) as core:
        await core.fs.share(f"/{alice_workspace}", bob_device_id.user_id)

    # Print out

    click.echo(
        f"""
Mount alice and bob drives using:

    $ parsec core run -P {password} -D {alice_slugid}
    $ parsec core run -P {password} -D {other_alice_slugid}
    $ parsec core run -P {password} -D {bob_slugid}
"""
    )
Beispiel #36
0
    async def api_realm_update_roles(self, client_ctx, msg):
        msg = realm_update_roles_serializer.req_load(msg)

        try:
            data = RealmRoleCertificateContent.verify_and_load(
                msg["role_certificate"],
                author_verify_key=client_ctx.verify_key,
                expected_author=client_ctx.device_id,
            )

        except DataError as exc:
            return {
                "status": "invalid_certification",
                "reason": f"Invalid certification data ({exc}).",
            }

        now = pendulum.now()
        if not timestamps_in_the_ballpark(data.timestamp, now):
            return {
                "status": "invalid_certification",
                "reason": "Invalid timestamp in certification.",
            }

        granted_role = RealmGrantedRole(
            certificate=msg["role_certificate"],
            realm_id=data.realm_id,
            user_id=data.user_id,
            role=data.role,
            granted_by=data.author,
            granted_on=data.timestamp,
        )
        if granted_role.granted_by.user_id == granted_role.user_id:
            return {
                "status": "invalid_data",
                "reason": "Realm role certificate cannot be self-signed.",
            }

        try:
            await self.update_roles(client_ctx.organization_id, granted_role,
                                    msg["recipient_message"])

        except RealmRoleAlreadyGranted:
            return realm_update_roles_serializer.rep_dump(
                {"status": "already_granted"})

        except RealmAccessError:
            return realm_update_roles_serializer.rep_dump(
                {"status": "not_allowed"})

        except RealmIncompatibleProfileError as exc:
            return realm_update_roles_serializer.rep_dump({
                "status": "incompatible_profile",
                "reason": str(exc)
            })

        except RealmNotFoundError as exc:
            return realm_update_roles_serializer.rep_dump({
                "status": "not_found",
                "reason": str(exc)
            })

        except RealmInMaintenanceError:
            return realm_update_roles_serializer.rep_dump(
                {"status": "in_maintenance"})

        return realm_update_roles_serializer.rep_dump({"status": "ok"})
Beispiel #37
0
def get_last_months(months):
    return pendulum.now(tz).subtract(months=months).start_of('month'), pendulum.now(tz)
Beispiel #38
0
    async def api_realm_create(self, client_ctx, msg):
        if client_ctx.profile == UserProfile.OUTSIDER:
            return {
                "status": "not_allowed",
                "reason": "Outsider user cannot create realm"
            }

        msg = realm_create_serializer.req_load(msg)

        try:
            data = RealmRoleCertificateContent.verify_and_load(
                msg["role_certificate"],
                author_verify_key=client_ctx.verify_key,
                expected_author=client_ctx.device_id,
            )

        except DataError as exc:
            return {
                "status": "invalid_certification",
                "reason": f"Invalid certification data ({exc}).",
            }

        now = pendulum.now()
        if not timestamps_in_the_ballpark(data.timestamp, now):
            return {
                "status": "invalid_certification",
                "reason": "Invalid timestamp in certification.",
            }

        granted_role = RealmGrantedRole(
            certificate=msg["role_certificate"],
            realm_id=data.realm_id,
            user_id=data.user_id,
            role=data.role,
            granted_by=data.author,
            granted_on=data.timestamp,
        )
        if granted_role.granted_by.user_id != granted_role.user_id:
            return {
                "status": "invalid_data",
                "reason":
                "Initial realm role certificate must be self-signed.",
            }
        if granted_role.role != RealmRole.OWNER:
            return {
                "status": "invalid_data",
                "reason":
                "Initial realm role certificate must set OWNER role.",
            }

        try:
            await self.create(client_ctx.organization_id, granted_role)

        except RealmNotFoundError as exc:
            return realm_create_serializer.rep_dump({
                "status": "not_found",
                "reason": str(exc)
            })

        except RealmAlreadyExistsError:
            return realm_create_serializer.rep_dump(
                {"status": "already_exists"})

        return realm_create_serializer.rep_dump({"status": "ok"})
Beispiel #39
0
    def _filter(self, data, strategies, wxcampaign: CampaignHelper):
        # today = pendulum.today().to_datetime_string()
        # yesterday = pendulum.yesterday().to_datetime_string()
        # yesterday_count = Point.where('created_at', '<', today)\
        #     .where('created_at', '>', yesterday)\
        #     .where('campaign_id', data['campaign_id'])\
        #     .where('status', ADSTATUS_NORMAL)\
        #     .order_by('id', 'desc')\
        #     .first()
        #
        # before_yesterday_count = Point.where('created_at', '<', yesterday)\
        #     .where('campaign_id', data['campaign_id']) \
        #     .where('status', ADSTATUS_NORMAL)\
        #     .order_by('id', 'desc')\
        #     .first()
        today = pendulum.today()
        yesterday = pendulum.yesterday()
        now = pendulum.now()
        actions = []
        delivery_start_time, delivery_end_time = wxcampaign.delivery_time()
        cur_hour = now.hour

        may_missing_key = [
            '1day_action_step', '1day_action_reversion',
            '1day_action_complete_order', '1day_action_complete_order_amount'
        ]
        for key in may_missing_key:
            if key not in data:
                data[key] = 0

        if delivery_start_time >= yesterday:
            '''
            凌晨1点-2点,新计划指最近一天建立的,
            话费/点击次数>10 || (当天花费>190 且没有订单), 生效时间改到6点
            '''
            if (int(data['click_count']) == 0 or int(data['total_cost']) / 100 / int(data['click_count']) > 10) or \
                    (data['total_cost'] > 19000 and data['1day_action_complete_order'] == 0):
                actions.append(build_command(data, 'timeset_end', 6))
        elif yesterday <= delivery_start_time < today:
            '''
            次新计划,满足上述条件直接暂停
            '''
            if (int(data['click_count']) == 0 or int(data['total_cost']) / 100 / int(data['click_count']) > 10) or \
                    (data['total_cost'] > 19000 and data['1day_action_complete_order'] == 0):
                actions.append(build_command(data, 'suspend', None))
        else:
            statistic = Point.select(Point.raw('max(1day_action_complete_order) as order_num, '
                                               'max(1day_action_complete_order_amount) as order_amount'))\
                .where('created_at', '>=', today)\
                .where('campaign_id', data['campaign_id'])\
                .where('status', ADSTATUS_NORMAL)\
                .first()
            order_amount = max(statistic.order_amount,
                               data['1day_action_complete_order_amount'], 0)
            order_num = max(statistic.order_num,
                            data['1day_action_complete_order'], 0)
            data['sy_cost'] = int(data['sy_cost'])
            ori = 0 if order_amount == 0 else data['sy_cost'] / order_amount
            if wxcampaign.is_beishang():
                '''
                晨1点-2点,定向为仅包含北上的计划,roi低于1.5 且订单个数<2,设置7点, 
                roi<1.5 && 订单个数>=2  设置到10点
                '''
                if order_num < 2 and ori < 1.5:
                    actions.append(build_command(data, 'timeset_end', 7))
                elif order_num > 4 and ori < 1.5:
                    actions.append(build_command(data, 'timeset_end', 10))
            else:
                '''
                1-2点,定向非北上城市,(当天花费 > 200 且 历史转换为0) 暂停,
                (花费> 200 历史总花费/历史总转化 >= 1.5) 设置到9点
                (花费> 200 历史总花费/历史总转化 < 1.5) 设置到7点
                '''
                records = Point.select(Point.raw('max(sy_cost) as day_cost, '
                                                 'max(1day_action_complete_order) as order_num, '
                                                 'max(1day_action_complete_order_amount) as order_amount'))\
                    .where('created_at', '>=', today.to_datetime_string()) \
                    .where('campaign_id', data['campaign_id']) \
                    .group_by(Point.raw('DATE_FORMAT(update_time, "%%Y%%m%%d")')) \
                    .get()
                total_cost = 0
                total_order = 0
                total_order_amount = 0
                for record in records:
                    total_cost += int(record.day_cost)
                    total_order += record.order_num
                    total_order_amount += record.order_amount
                transfer_rate = 0 if total_order == 0 else total_cost / total_order
                if data['sy_cost'] > 200:
                    if total_order == 0:
                        actions.append(build_command(data, 'suspend', None))
                    elif transfer_rate >= 1.5:
                        actions.append(build_command(data, 'timeset_end', 9))
                    else:
                        actions.append(build_command(data, 'timeset_end', 7))
        return actions
Beispiel #40
0
 def current():
     return (Service
             .objects(date__lte=pendulum.now(TZ), master=True)
             .order_by('-date')
             .first()
             )
 def test_unexpired_cache(self):
     state = Cached(cached_result_expiration=pendulum.now("utc") + timedelta(days=1))
     assert duration_only(state, None, None) is True
def test_expired_cache_stateful(validator):
    state = Cached(cached_result_expiration=pendulum.now("utc") - timedelta(days=1))
    assert validator()(state, None, None) is False
Beispiel #43
0
def top(repo_ctx, sort, rows):
    repos = []
    columns = OrderedDict([
        ("html_url", "URL"),
        ("stargazers_count", "Stars"),
        ("forks_count", "Forks"),
        ("open_issues_count", "Open Issues"),
        ("updated_at", "Last update"),
        ("pushed_at", "Pushed At"),
    ], )
    headers = list(columns.values())

    spinner = Halo(text="Fetch information about forks", spinner="dots")
    spinner.start()

    if sort in {"branches", "commits", "watchers"}:
        columns[sort] = sort.capitalize()
        headers.append(columns[sort])
        Repo = namedtuple("Repo", list(columns.keys()))
    else:
        Repo = namedtuple("Repo", list(columns.keys()))

    for fork in repo_ctx.forks:
        def_prop = [
            fork.html_url,
            fork.stargazers_count,
            fork.forks_count,
            fork.open_issues_count,
            fork.updated_at,
            fork.pushed_at,
        ]
        # github api may return nonexistent profile
        if sort == "branches":
            try:
                def_prop.append(len(list(fork.branches())))
                repos.append(Repo(*def_prop))
            except github3.exceptions.NotFoundError:
                click.echo("\nRepository {0} not found".format(fork.html_url))
        elif sort == "watchers":
            try:
                repo = repo_ctx.gh.repository(fork.owner.login, fork.name)
                def_prop.append(repo.subscribers_count)
                repos.append(Repo(*def_prop))
            except github3.exceptions.NotFoundError:
                click.echo("\nRepository {0} not found".format(fork.html_url))
        elif sort == "commits":
            try:
                def_prop.append(
                    sum((c.contributions_count for c in fork.contributors())))
                repos.append(Repo(*def_prop))
            except github3.exceptions.NotFoundError:
                click.echo("\nRepository {0} not found".format(fork.html_url))
        else:
            repos.append(Repo(*def_prop))

    sorted_forks = sorted(repos, key=attrgetter(sort), reverse=True)
    humanize_dates_forks = []
    for fork in sorted_forks[:rows]:
        days_passed_updated_at = (pendulum.now() -
                                  pendulum.parse(fork.updated_at)).days
        days_passed_pushed_at = (pendulum.now() -
                                 pendulum.parse(fork.pushed_at)).days
        human_updated_at = pendulum.now().subtract(
            days=days_passed_updated_at).diff_for_humans()
        human_pushed_at = pendulum.now().subtract(
            days=days_passed_pushed_at).diff_for_humans()
        humanize_dates_forks.append(
            fork._replace(updated_at=human_updated_at,
                          pushed_at=human_pushed_at))

    spinner.stop()
    click.echo(tabulate(humanize_dates_forks, headers=headers,
                        tablefmt="grid"))
Beispiel #44
0
    def initialize_run(  # type: ignore
        self,
        state: Optional[State],
        task_states: Dict[Task, State],
        context: Dict[str, Any],
        task_contexts: Dict[Task, Dict[str, Any]],
        parameters: Dict[str, Any],
    ) -> FlowRunnerInitializeResult:
        """
        Initializes the Task run by initializing state and context appropriately.

        If the provided state is a Submitted state, the state it wraps is extracted.

        Args:
            - state (Optional[State]): the initial state of the run
            - task_states (Dict[Task, State]): a dictionary of any initial task states
            - context (Dict[str, Any], optional): prefect.Context to use for execution
                to use for each Task run
            - task_contexts (Dict[Task, Dict[str, Any]], optional): contexts that will be
                provided to each task
            - parameters(dict): the parameter values for the run

        Returns:
            - NamedTuple: a tuple of initialized objects:
                `(state, task_states, context, task_contexts)`
        """

        # overwrite context parameters one-by-one
        context_params = context.setdefault("parameters", {})
        for p in self.flow.parameters():
            if not p.required:
                context_params.setdefault(p.name, p.default)
        for param, value in (parameters or {}).items():
            context_params[param] = value

        context.update(flow_name=self.flow.name)
        context.setdefault("scheduled_start_time", pendulum.now("utc"))

        # Determine the current time, allowing our formatted dates in the context
        # to rely on a manually set value
        now = context.get("date")
        if isinstance(now, str):
            # Attempt to parse into a `DateTime` object since it will often be passed
            # as a serialized string from the UI we'll override the context on a
            # successful parse so the type is consistent for users
            try:
                now = pendulum.parse(now)
                context["date"] = now
            except Exception:
                pass
        if not isinstance(now, pendulum.DateTime):
            if now is not None:
                self.logger.warning(
                    "`date` was set in the context manually but could not be parsed "
                    "into a pendulum `DateTime` object. Additional context variables "
                    "that rely on the current date i.e `today` and `tomorrow` will be "
                    "based on the current time instead of the `date` context variable."
                )
            now = pendulum.now("utc")

        # add various formatted dates to context
        dates = {
            "date": now,
            "today": now.strftime("%Y-%m-%d"),
            "yesterday": now.add(days=-1).strftime("%Y-%m-%d"),
            "tomorrow": now.add(days=1).strftime("%Y-%m-%d"),
            "today_nodash": now.strftime("%Y%m%d"),
            "yesterday_nodash": now.add(days=-1).strftime("%Y%m%d"),
            "tomorrow_nodash": now.add(days=1).strftime("%Y%m%d"),
        }
        for key, val in dates.items():
            context.setdefault(key, val)

        for task in self.flow.tasks:
            task_contexts.setdefault(task, {}).update(
                task_name=task.name, task_slug=self.flow.slugs[task]
            )

        state, context = super().initialize_run(state=state, context=context)
        return FlowRunnerInitializeResult(
            state=state,
            task_states=task_states,
            context=context,
            task_contexts=task_contexts,
        )
Beispiel #45
0
    def device_fitness_report(self, queue, worker_group, device):
        results = self.get_worker_jobs(queue, worker_group, device)
        task_successes = 0
        task_failures = 0
        task_runnings = 0
        task_exceptions = 0
        task_last_started_timestamp = None

        task_ids = []
        for task in results["recentTasks"]:
            task_id = task["taskId"]
            task_ids.append(task_id)

        try:
            results = ThreadPool(TASK_THREAD_COUNT).imap_unordered(
                self.get_task_status, task_ids)
        except Exception as e:
            print(e)
        for task_id, result, error in results:
            if error is None:
                task_state = None
                # filter out jobs that are gone
                if "code" in result:
                    if result["code"] == "ResourceNotFound":
                        continue
                try:
                    task_state = result["status"]["state"]
                    # pprint.pprint(result)
                except KeyError:
                    print("strange result: ")
                    pprint.pprint(result)
                    print(result["code"] == "ResourceNotFound")
                    continue

                # record the last started time
                if "runs" in result["status"]:
                    for run in result["status"]["runs"]:
                        if "started" in run:
                            temp_date = pendulum.parse(run["started"])
                            if task_last_started_timestamp is None:
                                task_last_started_timestamp = temp_date
                            # if temp_date more recent (larger), use it as the oldest date
                            if temp_date > task_last_started_timestamp:
                                task_last_started_timestamp = temp_date

                # TODO: gather exception stats
                if task_state == "running":
                    task_runnings += 1
                elif task_state == "exception":
                    task_exceptions += 1
                elif task_state == "failed":
                    task_failures += 1
                elif task_state == "completed":
                    retries_left = result["status"]["retriesLeft"]
                    if retries_left != 5:
                        runs = result["status"]["runs"]
                        for run in runs:
                            if run["workerId"] == device:
                                run_state = run["state"]
                                if run_state == "exception":
                                    task_exceptions += 1
                                elif run_state == "running":
                                    task_runnings += 1
                                elif run_state == "completed":
                                    task_successes += 1
                                elif run_state == "failed":
                                    task_failures += 1
                                else:
                                    raise Exception("Shouldn't be here!")
                    else:
                        task_successes += 1
                if self.verbosity:
                    print("%s.%s: %s: %s" %
                          (queue, device, task_id, task_state))
            else:
                # TODO: should return exception? only getting partial truth...
                pass
                # print("error fetching %r: %s" % (task_id, error))

        total = task_failures + task_successes
        results_obj = {}
        success_ratio_calculated = False
        if total > 0:
            success_ratio = task_successes / total
            # print("sr: %s/%s=%s" % (task_successes, total, success_ratio))
            results_obj["sr"] = success_ratio
            success_ratio_calculated = True
        else:
            results_obj["sr"] = float(0)
        results_obj["suc"] = task_successes
        results_obj["cmp"] = total
        results_obj["exc"] = task_exceptions
        results_obj["rng"] = task_runnings
        results_obj["ls"] = task_last_started_timestamp

        # note if no jobs in queue
        if queue in self.queue_counts:
            if self.queue_counts[queue] == 0:
                # if "notes" not in results_obj:
                #     results_obj["notes"] = []
                results_obj.setdefault("notes", []).append("No jobs in queue.")
                jobs_present = False
            else:
                jobs_present = True
        else:
            logger.warn("Strange, no queue count data for %s" % queue)

        # ping alerts
        if self.args.ping:
            if self.args.ping_host:
                cmd = [
                    "ssh",
                    self.args.ping_host,
                    "ping -c 1 -i 0.3 -w 1 %s.%s" %
                    (device, self.args.ping_domain),
                ]
                res = subprocess.run(cmd,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
                if res.returncode != 0:
                    if "alerts" not in results_obj:
                        results_obj["alerts"] = []
                    results_obj["alerts"].append("Not pingable!")
                # TODO: write to notes that it is pingable?
            else:
                logger.warn("sorry, not supported yet")

        # alert if success ratio is low
        if success_ratio_calculated:
            if success_ratio < self.alert_percent:
                # if "alerts" not in results_obj:
                #     results_obj["alerts"] = []
                results_obj.setdefault("alerts", []).append(
                    "Low health (less than %s)!" % self.alert_percent)

        # alert if worker hasn't worked in 1 hour
        dt = pendulum.now()
        # TODO: take minutes as an arg
        comparison_dt = dt.subtract(minutes=60)
        if jobs_present and task_last_started_timestamp < comparison_dt:
            results_obj.setdefault("alerts",
                                   []).append("No work started in last hour!")
        else:
            results_obj.setdefault("state", []).append("working")

        # alert if lots of exceptions
        if task_exceptions >= 3:
            results_obj.setdefault("alerts",
                                   []).append("High exceptions (3+)!")

        # alert if no work done
        if total == 0 and task_exceptions == 0 and task_runnings == 0:
            # if "alerts" not in results_obj:
            #     results_obj["alerts"] = []
            results_obj.setdefault("alerts", []).append("No work done!")

        # quarantine
        if device in self.quarantine_data[queue]:
            # if "alerts" not in results_obj:
            #     results_obj["alerts"] = []
            results_obj.setdefault("alerts", []).append("Quarantined.")

        return device, results_obj, None
async def test_user_create_ok(backend, apiv1_backend_sock_factory, alice,
                              mallory, profile):
    now = pendulum.now()
    user_certificate = UserCertificateContent(
        author=alice.device_id,
        timestamp=now,
        user_id=mallory.user_id,
        human_handle=None,
        public_key=mallory.public_key,
        profile=profile,
    ).dump_and_sign(alice.signing_key)
    device_certificate = DeviceCertificateContent(
        author=alice.device_id,
        timestamp=now,
        device_id=mallory.device_id,
        device_label=None,
        verify_key=mallory.verify_key,
    ).dump_and_sign(alice.signing_key)

    with backend.event_bus.listen() as spy:
        async with apiv1_backend_sock_factory(backend, alice) as sock:
            rep = await user_create(sock,
                                    user_certificate=user_certificate,
                                    device_certificate=device_certificate)
        assert rep == {"status": "ok"}

        # No guarantees this event occurs before the command's return
        await spy.wait_with_timeout(
            BackendEvent.USER_CREATED,
            {
                "organization_id": alice.organization_id,
                "user_id": mallory.user_id,
                "first_device_id": mallory.device_id,
                "user_certificate": user_certificate,
                "first_device_certificate": device_certificate,
            },
        )

    # Make sure mallory can connect now
    async with apiv1_backend_sock_factory(backend, mallory) as sock:
        rep = await user_get(sock, user_id=mallory.user_id)
        assert rep["status"] == "ok"

    # Check the resulting data in the backend
    backend_user, backend_device = await backend.user.get_user_with_device(
        mallory.organization_id, mallory.device_id)
    assert backend_user == User(
        user_id=mallory.user_id,
        human_handle=None,
        profile=profile,
        user_certificate=user_certificate,
        redacted_user_certificate=user_certificate,
        user_certifier=alice.device_id,
        created_on=now,
    )
    assert backend_device == Device(
        device_id=mallory.device_id,
        device_label=None,
        device_certificate=device_certificate,
        redacted_device_certificate=device_certificate,
        device_certifier=alice.device_id,
        created_on=now,
    )
Beispiel #47
0
def month_range(month_offset):
    month_range_list = [
        pendulum.now(tz).subtract(months=month_offset).start_of('month'),
        pendulum.now(tz).subtract(months=month_offset).end_of('month')
    ]
    return month_range_list
    def run(
        self,
        state: State = None,
        upstream_states: Dict[Edge, State] = None,
        context: Dict[str, Any] = None,
        is_mapped_parent: bool = False,
    ) -> State:
        """
        The main endpoint for TaskRunners.  Calling this method will conditionally execute
        `self.task.run` with any provided inputs, assuming the upstream dependencies are in a
        state which allow this Task to run.  Additionally, this method will wait and perform
        Task retries which are scheduled for <= 10 minutes in the future.

        Args:
            - state (State, optional): initial `State` to begin task run from;
                defaults to `Pending()`
            - upstream_states (Dict[Edge, State]): a dictionary
                representing the states of any tasks upstream of this one. The keys of the
                dictionary should correspond to the edges leading to the task.
            - context (dict, optional): prefect Context to use for execution
            - is_mapped_parent (bool): a boolean indicating whether this task run is the run of
                a parent mapped task

        Returns:
            - `State` object representing the final post-run state of the Task
        """
        context = context or {}
        with prefect.context(context):
            end_state = super().run(
                state=state,
                upstream_states=upstream_states,
                context=context,
                is_mapped_parent=is_mapped_parent,
            )
            while (end_state.is_retrying() or end_state.is_queued()) and (
                end_state.start_time <= pendulum.now("utc").add(minutes=10)  # type: ignore
            ):
                assert isinstance(end_state, (Retrying, Queued))
                naptime = max(
                    (end_state.start_time - pendulum.now("utc")).total_seconds(), 0
                )
                for _ in range(int(naptime) // 30):
                    # send heartbeat every 30 seconds to let API know task run is still alive
                    self.client.update_task_run_heartbeat(
                        task_run_id=prefect.context.get("task_run_id")
                    )
                    naptime -= 30
                    time.sleep(30)

                if naptime > 0:
                    time.sleep(naptime)  # ensures we don't start too early

                self.client.update_task_run_heartbeat(
                    task_run_id=prefect.context.get("task_run_id")
                )

                end_state = super().run(
                    state=end_state,
                    upstream_states=upstream_states,
                    context=context,
                    is_mapped_parent=is_mapped_parent,
                )

            return end_state
async def test_versions_existing_file_remove_minimal_synced(
        alice_workspace, alice):
    version_lister = alice_workspace.get_version_lister()
    versions, version_list_is_complete = await version_lister.list(
        FsPath("/files/renamed"))
    assert version_list_is_complete is True
    assert len(versions) == 5

    # Moved /files/content to /files/renamed on day 5, moved it again later
    assert versions[0][1:] == (
        3,
        _day(6),
        _day(7),
        alice.device_id,
        _day(5),
        False,
        5,
        FsPath("/files/content"),
        FsPath("/files/renamed_again"),
    )
    # Created a new file with the same name on day 8
    # This entry is deleted as we only get the one obtained by writing on it in our list
    # This is the entry where we wrote on it
    # Moved it again on day 9 as we renamed /files to /moved
    assert versions[1][1:] == (
        2,
        _day(8),
        _day(9),
        alice.device_id,
        _day(8),
        False,
        6,
        None,
        FsPath("/moved/renamed"),
    )
    # And moved back /moved to /files on day 11, /files/renamed is deleted on day 12
    assert versions[2][1:] == (
        2,
        _day(11),
        _day(12),
        alice.device_id,
        _day(8),
        False,
        6,
        FsPath("/moved/renamed"),
        None,
    )
    # Created a file, again, but didn't write
    assert versions[3][1:] == (
        1,
        _day(13),
        _day(14),
        alice.device_id,
        _day(13),
        False,
        0,
        None,
        None,
    )
    # Used "touch" method again, but on a created file. Wrote on it. Didn't delete since then
    assert versions[4][1:3] == (2, _day(14))
    assert now().add(hours=-1) < versions[4][3] < now()
    assert versions[4][4:] == (alice.device_id, _day(14), False, 5, None, None)
Beispiel #50
0
 def get_file_name(self):
     now = pendulum.now()
     return 'participation_metrics_{}_{}_{}_generated_{}_{}.xlsx'.format(
         self.tenant, self.start_date.to_date_string(),
         self.end_date.to_date_string(), now.to_date_string(),
         now.int_timestamp)
async def test_versions_existing_directory(alice_workspace, alice,
                                           skip_minimal_sync):
    version_lister = alice_workspace.get_version_lister()
    versions, version_list_is_complete = await version_lister.list(
        FsPath("/files"), skip_minimal_sync=skip_minimal_sync)
    assert version_list_is_complete is True
    assert len(versions) == 8

    assert versions[0][1:] == (
        1,
        _day(4),
        _day(4),
        alice.device_id,
        _day(4),
        True,
        None,
        None,
        None,
    )
    assert versions[1][1:] == (
        2,
        _day(4),
        _day(6),
        alice.device_id,
        _day(4),
        True,
        None,
        None,
        None,
    )
    assert versions[2][1:] == (
        3,
        _day(6),
        _day(7),
        alice.device_id,
        _day(6),
        True,
        None,
        None,
        None,
    )
    assert versions[3][1:] == (
        4,
        _day(7),
        _day(8),
        alice.device_id,
        _day(7),
        True,
        None,
        None,
        None,
    )
    assert versions[4][1:] == (
        5,
        _day(8),
        _day(9),
        alice.device_id,
        _day(8),
        True,
        None,
        None,
        FsPath("/moved"),
    )
    assert versions[5][1:] == (
        6,
        _day(11),
        _day(12),
        alice.device_id,
        _day(10),
        True,
        None,
        FsPath("/moved"),
        None,
    )
    assert versions[6][1:] == (
        7,
        _day(12),
        _day(13),
        alice.device_id,
        _day(12),
        True,
        None,
        None,
        None,
    )
    assert versions[7][1:3] == (8, _day(13))
    assert now().add(hours=-1) < versions[7][3] < now()
    assert versions[7][4:] == (alice.device_id, _day(13), True, None, None,
                               None)
        context.update(running_with_backend=True)

        end_state = super().run(
            state=state,
            task_states=task_states,
            return_tasks=return_tasks,
            parameters=parameters,
            task_runner_state_handlers=task_runner_state_handlers,
            executor=executor,
            context=context,
            task_contexts=task_contexts,
        )
        # If start time is more than 10 minutes in the future,
        # we fail the run so Lazarus can pick it up and reschedule it.
        while end_state.is_queued() and (
            end_state.start_time <= pendulum.now("utc").add(minutes=10)  # type: ignore
        ):
            assert isinstance(end_state, Queued)
            time_remaining = max(
                (end_state.start_time - pendulum.now("utc")).total_seconds(), 0
            )
            self.logger.debug(
                (
                    f"Flow run is in a Queued state. Sleeping for at most {time_remaining:.2f} "
                    f"seconds and attempting to run again."
                )
            )
            # Sleep until not in a queued state, then attempt to re-run
            while time_remaining > 0:
                delay = min(
                    prefect.config.cloud.check_cancellation_interval, time_remaining
Beispiel #53
0
 def __init__(self, api):
     super().__init__(api=api)
     self._state = {}
     self._end_date = pendulum.now()
Beispiel #54
0
 def _set_state(self, account_id: str):
     start_date = self._state[account_id] if self._state.get(
         account_id) else self._api._start_date
     self._state[account_id] = max(
         start_date,
         pendulum.now().subtract(days=self.buffer_days))
Beispiel #55
0
def main():
    parser = argparse.ArgumentParser(description='tiktok pipeline')
    parser.add_argument(
        "-s",
        "--start_date",
        help="start date",
        default=pendulum.now().subtract(days=5).to_date_string())
    parser.add_argument(
        "-e",
        "--end_date",
        help="end date",
        default=pendulum.now().subtract(days=1).to_date_string())

    args = parser.parse_args()
    started = datetime.now()
    sf_account = os.environ['SNOWFLAKE_ACCOUNT']
    sf_username = os.environ['SNOWFLAKE_USERNAME']
    sf_role = os.environ['SNOWFLAKE_ROLE']
    sf_password = os.environ['SNOWFLAKE_PASSWORD']
    sf_database = os.environ['SNOWFLAKE_DATABASE']
    sf_schema = os.environ['SNOWFLAKE_SCHEMA']
    sf_warehouse = os.environ['SNOWFLAKE_WAREHOUSE']

    conf = {
        'start_date': args.start_date,
        'end_date': args.end_date,
        'auth_token': os.environ['TIKTOK_API_KEY'],
        'advertiser_id': os.environ['TIKTOK_ADVERTISER_ID']
    }

    tap = 'tap-tiktok'
    target = 'atidiv-target-snowflake'
    tap_config = 'tiktok_config.json'
    target_config = 'snowflake_config.json'

    with open(tap_config, 'w') as out:
        json.dump(conf, out)

    snowflake_conf = {
        "snowflake_account": sf_account,
        "snowflake_username": sf_username,
        "snowflake_role": sf_role,
        "snowflake_password": sf_password,
        "snowflake_database": sf_database,
        "snowflake_schema": sf_schema,
        "snowflake_warehouse": sf_warehouse,
        "add_metadata_columns": False
    }

    with open(target_config, 'w') as out:
        json.dump(snowflake_conf, out)

    r = os.system(
        f" ~/.virtualenvs/{tap}/bin/{tap}  --config {tap_config}  | ~/.virtualenvs/{target}/bin/{target} -c {target_config}"
    )

    if r != 0:
        raise Exception('error occurred!')

    ended = datetime.now()
    pretty(f'execution time : {ended - started}')
Beispiel #56
0
def now():
    return hi.say() + "it is " + pendulum.now('Europe/Paris').format(
        'dddd Do [of] MMMM YYYY HH:mm:ss A')
Beispiel #57
0
def test_hmins2epoch(hours, mins):
    now = pendulum.now(tz='Europe/Berlin').replace(hour=hours, minute=mins)
    assert hmins2epoch(hours, mins) == now.format('X', formatter='alternative')
Beispiel #58
0
 def test_datetime_parameter_returns_datetimes_if_passed_string(self):
     now = pendulum.now()
     state = self.dt_flow.run(dt=str(now))
     assert state.result[self.dt].result == now
     assert state.result[self.x].result == now
Beispiel #59
0
class Achievement(EmbeddedDocument):
    title = StringField()
    description = StringField()
    key = StringField()
    date = DateTimeField(default=pendulum.now)
    validUntil = DateTimeField(default=lambda: pendulum.now().add(days=7))
Beispiel #60
0
    async def on_command_error(self, ctx: context.Context, error) -> None:

        error = getattr(error, 'original', error)

        if isinstance(error, commands.CommandNotFound):
            return

        elif isinstance(error, commands.CommandOnCooldown):
            cooldown_types = {
                commands.BucketType.default: f'for the whole bot.',
                commands.BucketType.user: f'for you.',
                commands.BucketType.member: f'for you.',
                commands.BucketType.role: f'for your role.',
                commands.BucketType.guild: f'for this server.',
                commands.BucketType.channel: f'for this channel.',
                commands.BucketType.category: f'for this channel category.'
            }
            await ctx.send(
                f'The command `{ctx.command}` is on cooldown {cooldown_types.get(error.cooldown.type, "for you.")} You can retry in '
                f'`{self.bot.utils.format_seconds(seconds=error.retry_after, friendly=True)}`'
            )
            return

        elif isinstance(error, commands.MaxConcurrencyReached):
            cooldowns = {
                commands.BucketType.default: f'.',
                commands.BucketType.user: f' per user.',
                commands.BucketType.member: f' per member.',
                commands.BucketType.role: f' per role.',
                commands.BucketType.guild: f' per server.',
                commands.BucketType.channel: f' per channel.',
                commands.BucketType.category: f' per channel category.',
            }
            await ctx.send(
                f'The command `{ctx.command}` is being ran at its maximum of {error.number} time(s){cooldowns.get(error.per, ".")} Retry a bit later.'
            )
            return

        elif isinstance(error, commands.BotMissingPermissions):
            permissions = '\n'.join(
                [f'> {permission}' for permission in error.missing_perms])
            message = f'I am missing the following permissions required to run the command `{ctx.command}`.\n{permissions}'
            try:
                await ctx.send(message)
            except discord.Forbidden:
                try:
                    await ctx.author.send(message)
                except discord.Forbidden:
                    return
            return

        elif isinstance(error, commands.MissingPermissions):
            permissions = '\n'.join(
                [f'> {permission}' for permission in error.missing_perms])
            await ctx.send(
                f'You are missing the following permissions required to run the command `{ctx.command}`.\n{permissions}'
            )
            return

        elif isinstance(error, commands.MissingRequiredArgument):
            await ctx.send(
                f'You missed the `{error.param.name}` argument. Use `{self.bot.config.prefix}help {ctx.command}` for more information on what arguments to use.'
            )
            return

        elif isinstance(error, commands.BadUnionArgument):
            await ctx.send(
                f'I was unable to convert the `{error.param}` argument. Use `{self.bot.config.prefix}help {ctx.command}` for more information on what arguments '
                f'to use.')
            return

        elif isinstance(error, commands.MissingRole):
            await ctx.send(
                f'The role `{error.missing_role}` is required to run this command.'
            )
            return

        elif isinstance(error, commands.BotMissingRole):
            await ctx.send(
                f'The bot requires the role `{error.missing_role}` to run this command.'
            )
            return

        elif isinstance(error, commands.MissingAnyRole):
            await ctx.send(
                f'The roles {", ".join([f"`{role}`" for role in error.missing_roles])} are required to run this command.'
            )
            return

        elif isinstance(error, commands.BotMissingAnyRole):
            await ctx.send(
                f'The bot requires the roles {", ".join([f"`{role}`" for role in error.missing_roles])} to run this command.'
            )
            return

        elif isinstance(error, commands.BadArgument):

            if isinstance(error, commands.MessageNotFound):
                await ctx.send(
                    f'A message for the argument `{error.argument}` was not found.'
                )
            elif isinstance(error, commands.MemberNotFound):
                await ctx.send(
                    f'A member for the argument `{error.argument}` was not found.'
                )
            elif isinstance(error, commands.UserNotFound):
                await ctx.send(
                    f'A user for the argument `{error.argument}` was not found.'
                )
            elif isinstance(error, commands.ChannelNotFound):
                await ctx.send(
                    f'A channel for the argument `{error.argument}` was not found.'
                )
            elif isinstance(error, commands.RoleNotFound):
                await ctx.send(
                    f'A role for the argument `{error.argument}` was not found.'
                )
            elif isinstance(error, commands.EmojiNotFound):
                await ctx.send(
                    f'An emoji for the argument `{error.argument}` was not found.'
                )
            elif isinstance(error, commands.ChannelNotReadable):
                await ctx.send(
                    f'I do not have permission to read the channel `{error.argument}`'
                )
            elif isinstance(error, commands.PartialEmojiConversionFailure):
                await ctx.send(
                    f'The argument `{error.argument}` did not match the partial emoji format.'
                )
            elif isinstance(error, commands.BadInviteArgument):
                await ctx.send(
                    f'The invite that matched that argument was not valid or is expired.'
                )
            elif isinstance(error, commands.BadBoolArgument):
                await ctx.send(
                    f'The argument `{error.argument}` was not a valid True/False value.'
                )
            elif isinstance(error, commands.BadColourArgument):
                await ctx.send(
                    f'The argument `{error.argument}` was not a valid colour.')
            elif isinstance(error, BadLiteralArgument):
                await ctx.send(
                    f'The argument `{error.param.name}` must be one of {", ".join([f"`{arg}`" for arg in error.valid_arguments])}.'
                )
            return

        else:

            error_messages = {
                exceptions.ArgumentError:
                f'{error}',
                exceptions.ImageError:
                f'{error}',
                exceptions.VoiceError:
                f'{error}',
                NodeNotFound:
                f'There are no lavalink nodes available right now.',
                commands.TooManyArguments:
                f'You used too many arguments. Use `{self.bot.config.prefix}help {ctx.command}` for more information on what argument to use.',
                commands.UnexpectedQuoteError:
                f'There was an unexpected quote character in the arguments you passed.',
                commands.InvalidEndOfQuotedStringError:
                f'There was an unexpected space after a quote character in the arguments you passed.',
                commands.ExpectedClosingQuoteError:
                f'There is a missing quote character in the argument you passed.',
                commands.BadArgument:
                f'I was unable to convert an argument that you used. Use `{self.bot.config.prefix}help {ctx.command}` for more information on what '
                f'arguments to use.',
                commands.CheckFailure:
                f'{error}',
                commands.PrivateMessageOnly:
                f'The command `{ctx.command}` can only be used in private messages',
                commands.NoPrivateMessage:
                f'The command `{ctx.command}` can not be used in private messages.',
                commands.NotOwner:
                f'The command `{ctx.command}` is owner only.',
                commands.NSFWChannelRequired:
                f'The command `{ctx.command}` can only be run in a NSFW channel.',
                commands.DisabledCommand:
                f'The command `{ctx.command}` has been disabled.',
            }

            error_message = error_messages.get(type(error), None)
            if error_message is not None:
                await ctx.send(error_message)

            return

        await ctx.send(
            f'Something went wrong while executing that command. Please use `{self.bot.config.prefix}support` for more help or information.'
        )

        formatter = prettify_exceptions.DefaultFormatter()

        formatter.theme['_ansi_enabled'] = True
        print(
            f'\n{"".join(formatter.format_exception(type(error), error, error.__traceback__)).strip()}\n'
        )

        time = self.bot.utils.format_datetime(datetime=pendulum.now(tz='UTC'))
        guild = f'`Guild:` {ctx.guild} `{ctx.guild.id}`\n' if ctx.guild else ''
        info = f'Error in command `{ctx.command}`\n\n{guild}`Channel:` {ctx.channel} `{ctx.channel.id}`\n`Author:` {ctx.author} `{ctx.author.id}`\n`Time:` {time}'

        embed = discord.Embed(colour=ctx.colour,
                              description=f'{ctx.message.content}')
        embed.add_field(name='Info:', value=info)

        await self.bot.errors_webhook.send(
            embed=embed,
            username=f'{ctx.author}',
            avatar_url=str(
                ctx.author.avatar_url_as(format='gif' if ctx.author.
                                         is_avatar_animated() else 'png')))

        formatter.theme['_ansi_enabled'] = False
        traceback = "".join(
            formatter.format_exception(type(error), error,
                                       error.__traceback__)).strip()

        if len(traceback) > 2000:
            async with self.bot.session.post('https://mystb.in/documents',
                                             data=traceback) as response:
                response = await response.json()
            traceback = f'https://mystb.in/{response["key"]}.python'
        else:
            traceback = f'```\n{traceback}\n```'

        await self.bot.errors_webhook.send(
            content=f'{traceback}',
            username=f'{ctx.author}',
            avatar_url=str(
                ctx.author.avatar_url_as(format='gif' if ctx.author.
                                         is_avatar_animated() else 'png')))