コード例 #1
0
ファイル: app.py プロジェクト: ntpz/markr
 def stats(cls):
     q = cls.select(Origin.code, Origin.title, Origin.tick_size,
                    fn.MIN(cls.ts).alias('min_ts'),
                    fn.MAX(cls.ts).alias('max_ts'),
                    fn.COUNT(cls.id).alias('num_marks'),
                    fn.MIN(cls.dts).alias('min_dts'),
                    fn.MAX(cls.dts).alias('max_dts'),
                    fn.AVG(cls.dts).alias('avg_dts'),
                    fn.STDDEV_POP(
                        cls.dts).alias('std_dts')).join(Origin).group_by(
                            Origin.code)
     return q.namedtuples()
コード例 #2
0
def get_smallest_match_id() -> Optional[int]:
    subq = (MatchId.select(fn.MIN(MatchId.match_id)))
    try:
        match_id = (MatchId.select().where(
            MatchId.match_id == subq)).get().match_id
    except MatchId.DoesNotExist:
        match_id = None

    return match_id
コード例 #3
0
ファイル: tle.py プロジェクト: kerel-fs/space-command
def print_stats():
    db = TleDb()
    first = db.model.select(fn.MIN(TleModel.insert_date)).scalar()
    last = db.model.select(fn.MAX(TleModel.insert_date)).scalar()

    print("Objects       {}".format(db.model.select().group_by(
        TleModel.norad_id).count()))
    print("TLE           {}".format(db.model.select().count()))
    print("First fetch   {}".format(first))
    print("Last fetch    {}".format(last))
コード例 #4
0
def multi_reduce() -> 'ModelSelect':
    """多个聚集函数.

    SELECT COUNT(*) AS count,
           MIN(prod_price) AS min,
           MAX(prod_price) AS max,
           AVG(prod_price) AS avg,
           SUM(prod_price) AS sum
    FROM product;
    """
    return (Product.select(count().alias('count'),
                           fn.MIN(Product.prod_price).alias('min'),
                           fn.MAX(Product.prod_price).alias('max'),
                           fn.AVG(Product.prod_price).alias('avg'),
                           fn.SUM(Product.prod_price).alias('sum')))
コード例 #5
0
def saveProcessedFile(file, path):
    tmp = Mad_ToBeProcessedFile(
        path=path,
        status=False
    )
    tmp.save()
    count = Mad_ProcessedFile.select().count()  # pylint: disable=E1120
    minimum = Mad_ProcessedFile.select(fn.MIN(Mad_ProcessedFile.id)).scalar()  # pylint: disable=E1120
    while count >= 600:
        Mad_ProcessedFile.delete().where(Mad_ProcessedFile.id == minimum).execute()
        minimum += 1
        count = Mad_ProcessedFile.select().count()  # pylint: disable=E1120
    tmp = Mad_ProcessedFile(
        name=file
    )
    tmp.save()
コード例 #6
0
def per_house_overlap(hour_per_step):
    time_range = (Task.select(
        fn.MAX(Task.created_at).alias('max_created_at'),
        fn.MIN(Task.created_at).alias('min_created_at'),
    ).get())

    time_cursor = time_range.min_created_at.replace(minute=0,
                                                    second=0,
                                                    microsecond=0)
    one_step = timedelta(hours=hour_per_step)

    start_time = time_cursor
    end_time = time_range.max_created_at
    time_duration = end_time - start_time
    time_cursor += one_step
    per_test_coverage = {}
    overall_coverage = []

    print(f'=== Every {hour_per_step} Hours Coverage ===')
    print('progress:  0%', end='\r')

    while time_cursor < end_time:
        progress = (time_cursor - start_time) / time_duration
        print(f'progress:  {progress*100:5.1f}%', end='\r')

        stats = compare_step_hour_data(time_cursor, hour_per_step)

        for minute_ago in stats['tests'].keys():
            if minute_ago not in per_test_coverage:
                per_test_coverage[minute_ago] = []
            per_test_coverage[minute_ago].append(stats['tests'][minute_ago])

        overall_coverage += stats['overall']

        time_cursor += one_step

    for minute_ago in sorted(per_test_coverage.keys()):
        mean = statistics.mean(per_test_coverage[minute_ago])
        stdev = statistics.stdev(per_test_coverage[minute_ago])
        print(
            f'Test[{minute_ago:02}] coverage avg: {mean:5.1f}%, std: {stdev:5.1f}%'
        )

    mean = statistics.mean(overall_coverage)
    stdev = statistics.stdev(overall_coverage)
    print(f'Overall  coverage avg: {mean:5.1f}%, std: {stdev:5.1f}%')
コード例 #7
0
ファイル: etl.py プロジェクト: jrocketfingers/is2
                                 operational.Customer.name.alias(Aliases.customer_name),
                                 operational.Customer.age,
                                 operational.Customer.gender,
                                 operational.Seller.PIB,
                                 operational.Seller.name.alias(Aliases.seller_name),
                                 operational.Seller.street.alias(Aliases.seller_street),
                                 operational.Seller.number.alias(Aliases.seller_number),
                                 operational.District.name.alias(Aliases.district),
                                 operational.City.name.alias(Aliases.city),
                                 operational.Article.name.alias(Aliases.article_name),
                                 operational.Type.name.alias(Aliases.type),
                                 operational.Size.name.alias(Aliases.size),
                                 operational.Color.name.alias(Aliases.color),
                                 fn.DATE(operational.Order.created_at).alias(Aliases.date),
                                 operational.Order.created_at.hour.alias(Aliases.hour),
                                 fn.MIN(operational.Offer.price).alias(Aliases.lowest_price),
                                 fn.MAX(operational.Offer.price).alias(Aliases.highest_price))\
 .join(operational.Customer)\
 .join(operational.OfferOrder, on=(operational.OfferOrder.order_id == operational.Order.id))\
 .join(operational.Offer)\
 .join(operational.Article)\
 .join(operational.Seller, on=(operational.Offer.seller_id == operational.Seller.PIB)) \
 .join(operational.District, on=(operational.Seller.district_id == operational.District.id))\
 .join(operational.City, on=(operational.District.city_id == operational.City.id))\
 .join(operational.Type, on=(operational.Type.id == operational.Article.type_id))\
 .join(operational.Color, on=(operational.Color.id == operational.Article.color_id))\
 .join(operational.Size, on=(operational.Size.id == operational.Article.size_id))\
 .group_by(operational.Customer.JMBG,
           operational.Customer.name,
           operational.Customer.gender,
           operational.Customer.age,
コード例 #8
0
 def search_min(cls):
     data = cls.select(cls.name ,cls.website ,cls.productid,cls.price,cls.time)\
         .where(cls.time == cls.select(fn.MAX(cls.time)))\
         .where(cls.price == cls.select(fn.MIN(cls.price))).limit(1).dicts()
     return [i for i in data]
コード例 #9
0
ファイル: subreddit.py プロジェクト: pl77/redditPostArchiver
def process_comments(appcfg):
    # Get newest comments with two week overlap
    print('   PROCESSING NEWEST PUSHSHIFT.IO COMMENTS FOR', appcfg.subreddit)

    try:
        newest_utc = int(
            Comment.select(fn.MAX(Comment.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        newest_utc = None
    if newest_utc is not None:
        oldestdate = newest_utc  # - 1209600  # two weeks overlap, in seconds
    else:
        oldestdate = appcfg.oldestdate

    try:
        comment_id_set = get_push_comments(appcfg, appcfg.newestdate,
                                           oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        comment_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return comment_id_set

    # Get oldest comments in case progress was interrupted, with two week overlap
    try:
        oldest_utc = int(
            Comment.select(fn.MIN(Comment.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        oldest_utc = None
    if oldest_utc is not None:
        newestdate = oldest_utc  # + 1209600  # two weeks overlap, in seconds
    else:
        newestdate = appcfg.newestdate
    print('   PROCESSING OLDEST PUSHSHIFT.IO COMMENTS FOR', appcfg.subreddit)

    try:
        old_comment_id_set = get_push_comments(appcfg, newestdate,
                                               appcfg.oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        old_comment_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return old_comment_id_set
    comment_id_set |= old_comment_id_set
    filedate = arrow.now().timestamp
    basedir = "/rpa" if os.environ.get('DOCKER', '0') == '1' else '.'
    coutput_file_path = "{basedir}/{subreddit}_comments_{timestamp}.txt".format(
        basedir=basedir, subreddit=appcfg.subreddit, timestamp=filedate)

    # with open(coutput_file_path, 'w', encoding='UTF-8') as comment_file:
    #     comment_file.writelines(comment_id_set)
    print("     Total comments submitted to", appcfg.subreddit, "in set:",
          len(comment_id_set))
    deleted = Author.get_or_none(name='[deleted]')
    if deleted is not None:
        cupdatet = Comment.update(deleted=True).where(
            (Comment.author == deleted.id)
            & (Comment.deleted.is_null() or Comment.deleted == 0)).execute()
        print(
            '     Updated deleted field in comments.  Set deleted = True for',
            cupdatet, 'records.')
        cupdatef = Comment.update(
            deleted=False).where((Comment.author != deleted.id)
                                 & (Comment.deleted.is_null())).execute()
        print(
            '     Updated deleted field in comments.  Set deleted = False for',
            cupdatef, 'records.')
コード例 #10
0
    def build_query(self, version_id, query_region=None):
        c = Catalog.alias()
        c2t = CatalogToBHM_CSC.alias()
        t = BHM_CSC.alias()
        self.alias_t = t
        # c2s16 = CatalogToSDSS_DR16_SpecObj.alias()
        # s16 = SDSS_DR16_SpecObj.alias()
        # s2020 = BHM_eFEDS_Veto.alias()
        # sV = SDSSV_BOSS_SPALL.alias()
        # ph = SDSSV_Plateholes.alias()
        # phm = SDSSV_Plateholes_Meta.alias()

        # set the Carton priority+values here - read from yaml
        value = peewee.Value(self.parameters.get('value', 1.0)).cast('float')
        instrument = peewee.Value(self.instrument)
        cadence = peewee.Value(self.this_cadence)
        # opt_prov = peewee.Value('ps1_psfmag')

        if (self.instrument == 'BOSS'):

            # #########################################################################
            # prepare the spectroscopy catalogues
            match_radius_spectro = self.parameters['spec_join_radius'] / 3600.0
            spec_sn_thresh = self.parameters['spec_sn_thresh']
            spec_z_err_thresh = self.parameters['spec_z_err_thresh']
            dpriority_has_spec = self.parameters['dpriority_has_spec']

            # SDSS DR16
            c2s16 = CatalogToSDSS_DR16_SpecObj.alias()
            ss16 = SDSS_DR16_SpecObj.alias()
            s16 = (
                ss16.select(
                    ss16.specobjid.alias('specobjid'),
                )
                .where(
                    ss16.snmedian >= spec_sn_thresh,
                    ss16.zwarning == 0,
                    ss16.zerr <= spec_z_err_thresh,
                    ss16.zerr > 0.0,
                    ss16.scienceprimary > 0,
                )
                .alias('s16')
            )

            # SDSS-IV/eFEDS March2020
            c2s2020 = CatalogToBHM_eFEDS_Veto.alias()
            ss2020 = BHM_eFEDS_Veto.alias()
            s2020 = (
                ss2020.select(
                    ss2020.pk.alias('pk'),
                )
                .where(
                    ss2020.sn_median_all >= spec_sn_thresh,
                    ss2020.zwarning == 0,
                    ss2020.z_err <= spec_z_err_thresh,
                    ss2020.z_err > 0.0,
                )
                .alias('s2020')
            )

            # SDSS-V spAll
            ssV = SDSSV_BOSS_SPALL.alias()
            sV = (
                ssV.select(
                    ssV.specobjid.alias('specobjid'),
                    ssV.plug_ra.alias('plug_ra'),
                    ssV.plug_dec.alias('plug_dec'),
                )
                .where(
                    ssV.sn_median_all >= spec_sn_thresh,
                    ssV.zwarning == 0,
                    ssV.z_err <= spec_z_err_thresh,
                    ssV.z_err > 0.0,
                    ssV.specprimary > 0,
                )
                .alias('sV')
            )

            # SDSS-V plateholes - only consider plateholes that
            # were drilled+shipped but that were not yet observed
            ssph = SDSSV_Plateholes.alias()
            ssphm = SDSSV_Plateholes_Meta.alias()
            ssconf = SDSSV_BOSS_Conflist.alias()
            sph = (
                ssph.select(
                    ssph.pkey.alias('pkey'),
                    ssph.target_ra.alias('target_ra'),
                    ssph.target_dec.alias('target_dec'),
                )
                .join(
                    ssphm,
                    on=(ssph.yanny_uid == ssphm.yanny_uid)
                )
                .join(
                    ssconf, JOIN.LEFT_OUTER,
                    on=(ssphm.plateid == ssconf.plate)
                )
                .where(
                    (ssph.holetype == 'BOSS_SHARED'),
                    (ssph.sourcetype == 'SCI') | (ssph.sourcetype == 'STA'),
                    ssphm.isvalid > 0,
                    ssconf.plate.is_null(),
                )
                .alias('sph')
            )

            # adjust priority if target aleady has an SDSS spectrum
            priority_1 = peewee.Case(
                None,
                (
                    (s16.c.specobjid.is_null(False), 1),  # any of these can be satisfied
                    (s2020.c.pk.is_null(False), 1),
                    (sV.c.specobjid.is_null(False), 1),
                    (sph.c.pkey.is_null(False), 1),
                ),
                0)
            #
            # Compute net priority
            priority = (
                peewee.Value(self.parameters['priority_floor']) +
                priority_1 * dpriority_has_spec
            )
        else:
            priority = peewee.Value(self.parameters['priority_floor'])

        # compute transformed SDSS mags for pointlike and extended sources separately
        # transform the csc (panstarrs1-dr1) griz into sdss psfmag griz

        # extract coeffs from fit logs via:
        # awk 'BEGIN {print("coeffs = {")} /POLYFIT/{ if($3~/sdss_psfmag/){pe="p"} else if ($3~/sdss_fiber2mag/){pe="e"} else{pe="error"}; printf("\"%s%d_%s\": %s,\n", substr($3,length($3)), $8, pe, $10)} END {print("}")}'  bhm_csc_boss/ts_mag_to_sdss_psfmag_?_results.log  # noqa

        coeffs = {
            "g2_p": 0.087878,
            "g1_p": 0.063329,
            "g0_p": 0.021488,
            "i2_p": -0.011220,
            "i1_p": 0.020782,
            "i0_p": 0.000154,
            "r2_p": -0.093371,
            "r1_p": 0.136032,
            "r0_p": -0.011477,
            "z2_p": -0.180526,
            "z1_p": 0.007284,
            "z0_p": -0.037933,
        }
        # Note that the corrections for r and i are very small,
        # however g+z both have non-negligible colour terms

        g0 = peewee.Case(None, ((t.mag_g <= 0.0, None),), t.mag_g)
        r0 = peewee.Case(None, ((t.mag_r <= 0.0, None),), t.mag_r)
        i0 = peewee.Case(None, ((t.mag_i <= 0.0, None),), t.mag_i)
        z0 = peewee.Case(None, ((t.mag_z <= 0.0, None),), t.mag_z)
        g_r = g0 - r0
        r_i = r0 - i0
        i_z = i0 - z0

        # use single set of transforms because we do not have any info in csc parent table to
        # differentiate between pointlike and extended sources)
        g = (g0 + coeffs['g0_p'] + coeffs['g1_p'] * g_r + coeffs['g2_p'] * g_r * g_r)
        r = (r0 + coeffs['r0_p'] + coeffs['r1_p'] * g_r + coeffs['r2_p'] * g_r * g_r)
        i = (i0 + coeffs['i0_p'] + coeffs['i1_p'] * r_i + coeffs['i2_p'] * r_i * r_i)
        z = (z0 + coeffs['z0_p'] + coeffs['z1_p'] * i_z + coeffs['z2_p'] * i_z * i_z)

        # validity checks (only griz) - set limits semi-manually
        g_r_min = -0.3
        g_r_max = 1.7
        r_i_min = -0.5
        r_i_max = 2.5
        i_z_min = -0.3
        i_z_max = 1.25
        valid = (g0.between(0.1, 29.9) &
                 r0.between(0.1, 29.9) &
                 i0.between(0.1, 29.9) &
                 z0.between(0.1, 29.9) &
                 g_r.between(g_r_min, g_r_max) &
                 r_i.between(r_i_min, r_i_max) &
                 i_z.between(i_z_min, i_z_max))

        opt_prov = peewee.Case(None, ((valid, 'sdss_psfmag_from_csc'),), 'undefined')
        magnitude_g = peewee.Case(None, ((valid, g),), 'NaN')
        magnitude_r = peewee.Case(None, ((valid, r),), 'NaN')
        magnitude_i = peewee.Case(None, ((valid, i),), 'NaN')
        magnitude_z = peewee.Case(None, ((valid, z),), 'NaN')
        magnitude_h = peewee.Case(None, ((t.mag_h <= 0.0, None),), t.mag_h).cast('float')

        # # Process the bhm_csc.[g,r,i,z,h] magnitudes to deal with zeros
        # magnitude_g = peewee.Case(None, ((t.mag_g <= 0.0, None),), t.mag_g).cast('float')
        # magnitude_r = peewee.Case(None, ((t.mag_r <= 0.0, None),), t.mag_r).cast('float')
        # magnitude_i = peewee.Case(None, ((t.mag_i <= 0.0, None),), t.mag_i).cast('float')
        # magnitude_z = peewee.Case(None, ((t.mag_z <= 0.0, None),), t.mag_z).cast('float')
        # magnitude_h = peewee.Case(None, ((t.mag_h <= 0.0, None),), t.mag_h).cast('float')

        # Create a subquery that will calculate the minimum catalog_to_bhm_csc.distance for each
        # csc candidate target
        subq = (
            c2t
            .select(
                c2t.target_id,
                fn.MIN(c2t.distance).alias('min_distance'))
            .where(
                c2t.version_id == version_id,
                c2t.best >> True
            )
            .group_by(c2t.target_id)
            .alias('min_dist_subq')
        )

        query = (
            c.select(
                c.catalogid,
                t.cxo_name,   # extra
                t.pk.alias('csc_pk'),   # extra
                c.ra,  # extra
                c.dec,  # extra
                priority.alias('priority'),
                value.alias('value'),
                cadence.alias('cadence'),
                instrument.alias('instrument'),
                opt_prov.alias('optical_prov'),
                magnitude_g.alias('g'),
                magnitude_r.alias('r'),
                magnitude_i.alias('i'),
                magnitude_z.alias('z'),
                magnitude_h.alias('h'),
                t.mag_g.alias('csc_mag_g'),   # extra
                t.mag_r.alias('csc_mag_r'),   # extra
                t.mag_i.alias('csc_mag_i'),   # extra
                t.mag_z.alias('csc_mag_z'),   # extra
                t.oir_ra.alias('csc_ra'),   # extra
                t.oir_dec.alias('csc_dec'),   # extra
            )
            .join(c2t)
            .join(t)
            .join(
                subq,
                on=(
                    (c2t.target_id == subq.c.target_id) &
                    (
                        (c2t.distance == subq.c.min_distance) |
                        (c2t.distance.is_null() & subq.c.min_distance.is_null())
                    )
                ),
            )
            .where(
                c.version_id == version_id,
                c2t.version_id == version_id,
                c2t.best >> True
            )
            # .distinct([c2t.target_id])  # avoid duplicates - trust the CSC parent sample,
            # .distinct([c.catalogid])  # avoid duplicates - trust the catalogid,
            # avoid duplicates - trust uniquness in both CSC name and catalogid
            .distinct([c.catalogid])
            # .distinct([t.cxo_name])
            .where
            (
                t.spectrograph == self.instrument
            )
        )

        if (self.instrument == 'BOSS'):
            # Append the spectro query
            query = (
                query
                .switch(c)
                .join(c2s16, JOIN.LEFT_OUTER)
                .join(
                    s16, JOIN.LEFT_OUTER,
                    on=(
                        (c2s16.target_id == s16.c.specobjid) &
                        (c2s16.version_id == version_id)
                    )
                )
                .switch(c)
                .join(c2s2020, JOIN.LEFT_OUTER)
                .join(
                    s2020, JOIN.LEFT_OUTER,
                    on=(
                        (c2s2020.target_id == s2020.c.pk) &
                        (c2s2020.version_id == version_id)
                    )
                )
                .join(
                    sV, JOIN.LEFT_OUTER,
                    on=(
                        fn.q3c_join(sV.c.plug_ra, sV.c.plug_dec,
                                    c.ra, c.dec,
                                    match_radius_spectro)
                    )
                )
                .join(
                    sph, JOIN.LEFT_OUTER,
                    on=(
                        fn.q3c_join(sph.c.target_ra, sph.c.target_dec,
                                    c.ra, c.dec,
                                    match_radius_spectro)
                    )
                )
            )

        if query_region:
            query = query.where(peewee.fn.q3c_radial_query(c.ra, c.dec,
                                                           query_region[0],
                                                           query_region[1],
                                                           query_region[2]))

        return query
コード例 #11
0
 def span(cls):
     return (
         cls.select(fn.MIN(cls.id)).scalar(),
         cls.select(fn.MAX(cls.id)).scalar(),
     )
コード例 #12
0
def getMinEventTimestamp(clientID):
    query = (Event.select(fn.MIN(Event.timestamp).alias("timestamp")).where(
        (Event.client_id == clientID)).get())
    return query.timestamp.timestamp()
コード例 #13
0
    def info(self, event, user=None):

        if not user:
            user = event.author
        else:
            if not isinstance(user, DiscoUser):
                try:
                    user = self.state.guilds[event.guild.id].members[user].user
                except KeyError:
                    try:
                        user = self.state.users[user]
                    except KeyError:
                        try:
                            user = self.bot.client.api.users_get(user)
                        except APIException:
                            return event.msg.reply(
                                'User not found :eyes:').after(3).delete()

        self.client.api.channels_typing(event.channel.id)

        content = []
        content.append('**\u276F User Information**')
        content.append('Profile: <@{}>'.format(user.id))

        created_dt = to_datetime(user.id)
        content.append('Created: {} ({})'.format(
            humanize.naturaltime(datetime.utcnow() - created_dt),
            created_dt.strftime("%b %d %Y %H:%M:%S")))

        member = event.guild.get_member(user.id) if event.guild else None

        if user.presence:  #I couldn't get this to work w/o it lol
            emoji, status = get_status_emoji(user.presence)
            content.append('Status: <{}> {}'.format(emoji, status))
            if user.presence.game and user.presence.game.name:
                if user.presence.game.type == ActivityTypes.DEFAULT:
                    content.append('{}'.format(user.presence.game.name))
                if user.presence.game.type == ActivityTypes.CUSTOM:
                    content.append('Custom Status: {}'.format(
                        user.presence.game.state))
                if user.presence.game.type == ActivityTypes.LISTENING:
                    content.append('Listening to {} on Spotify'.format(
                        user.presence.game.details)
                                   )  #In the embed, details is the songname.
                if user.presence.game.type == ActivityTypes.STREAMING:
                    content.append('Streaming: [{}]({})'.format(
                        user.presence.game.name, user.presence.game.url))

        if user.public_flags:
            badges = ''
            user_badges = list(UserFlags(user.public_flags))
            for badge in user_badges:
                badges += '<{}> '.format(BADGE_EMOJI[badge])

            content.append('Badges: {}'.format(badges))

        if member:
            content.append('\n**\u276F Member Information**')

            if member.nick:
                content.append('Nickname: {}'.format(member.nick))

            content.append('Joined: {} ago ({})'.format(
                humanize.naturaldelta(datetime.utcnow() - member.joined_at),
                member.joined_at.strftime("%b %d %Y %H:%M:%S"),
            ))

            if member.roles:
                content.append('Roles: {}'.format(', '.join(
                    ('<@&{}>'.format(member.guild.roles.get(r).id)
                     for r in member.roles))))

        # Execute a bunch of queries
        newest_msg = Message.select(fn.MAX(Message.id)).where(
            (Message.author_id == user.id)
            & (Message.guild_id == event.guild.id)).tuples()[0][0]

        oldest_msg = Message.select(fn.MIN(Message.id)).where(
            (Message.author_id == user.id)
            & (Message.guild_id == event.guild.id)).tuples()[0][0]  #Slow Query

        voice = GuildVoiceSession.select(
            fn.COUNT(GuildVoiceSession.user_id),
            fn.SUM(GuildVoiceSession.ended_at - GuildVoiceSession.started_at)
        ).where((GuildVoiceSession.user_id == user.id)
                & (~(GuildVoiceSession.ended_at >> None))
                & (GuildVoiceSession.guild_id == event.guild.id)).tuples()[0]

        infractions = Infraction.select(Infraction.id).where(
            (Infraction.user_id == user.id)
            & (Infraction.guild_id == event.guild.id)).tuples()

        if newest_msg and oldest_msg:
            content.append('\n **\u276F Activity**')
            content.append('Last Message: {} ({})'.format(
                humanize.naturaltime(datetime.utcnow() -
                                     to_datetime(newest_msg)),
                to_datetime(newest_msg).strftime("%b %d %Y %H:%M:%S"),
            ))
            content.append('First Message: {} ({})'.format(
                humanize.naturaltime(datetime.utcnow() -
                                     to_datetime(oldest_msg)),
                to_datetime(oldest_msg).strftime("%b %d %Y %H:%M:%S"),
            ))

        if len(infractions) > 0:
            content.append('\n**\u276F Infractions**')
            total = len(infractions)
            content.append('Total Infractions: **{:,}**'.format(total))

        if voice[0]:
            content.append('\n**\u276F Voice**')
            content.append('Sessions: `{:,}`'.format(voice[0]))
            content.append('Time: `{}`'.format(
                str(humanize.naturaldelta(voice[1])).title()))

        embed = MessageEmbed()

        try:
            avatar = User.with_id(user.id).get_avatar_url()
        except:
            avatar = user.get_avatar_url(
            )  # This fails if the user has never been seen by speedboat.

        embed.set_author(name='{}#{} ({})'.format(
            user.username,
            user.discriminator,
            user.id,
        ),
                         icon_url=avatar)

        embed.set_thumbnail(url=avatar)

        embed.description = '\n'.join(content)
        embed.color = get_dominant_colors_user(user, avatar)
        event.msg.reply('', embed=embed)
コード例 #14
0
def process_submissions(appcfg):
    # Get newest submissions with two week overlap
    print('   PROCESSING NEWEST PUSHSHIFT.IO SUBMISSIONS FOR',
          appcfg.subreddit)

    try:
        newest_utc = int(
            Submission.select(fn.MAX(
                Submission.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        newest_utc = None
    if newest_utc is not None:
        oldestdate = newest_utc  # - 1209600  # two weeks overlap, in seconds
    else:
        oldestdate = appcfg.oldestdate

    try:
        post_id_set = get_push_submissions(appcfg, appcfg.newestdate,
                                           oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        post_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return post_id_set

    # Get oldest submissions in case progress was interrupted, with four week overlap
    try:
        oldest_utc = int(
            Submission.select(fn.MIN(
                Submission.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        oldest_utc = None
    if oldest_utc is not None:
        newestdate = oldest_utc  # + 2400000  # four week overlap, in seconds
    else:
        newestdate = appcfg.newestdate
    print('   PROCESSING OLDEST PUSHSHIFT.IO SUBMISSIONS FOR',
          appcfg.subreddit)

    try:
        old_post_id_set = get_push_submissions(appcfg, newestdate,
                                               appcfg.oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        old_post_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return old_post_id_set

    post_id_set |= old_post_id_set
    filedate = arrow.now().timestamp
    output_file_path = "{subreddit}_{timestamp}.csv".format(
        subreddit=appcfg.subreddit, timestamp=filedate)

    # with open(output_file_path, 'w', encoding='UTF-8') as post_file:
    #     post_file.writelines(post_id_set)

    print("     Total posts submitted to", appcfg.subreddit, "in set:",
          len(post_id_set))
    deleted = Author.get_or_none(name='[deleted]')
    if deleted is not None:
        supdatet = Submission.update(
            deleted=True).where((Submission.author == deleted.id)
                                & (Submission.deleted.is_null()
                                   or Submission.deleted == 0)).execute()
        print(
            '     Updated deleted field in submissions.  Set deleted = True for ',
            supdatet, ' records.')
        supdatef = Submission.update(
            deleted=False).where((Submission.author != deleted.id)
                                 & (Submission.deleted.is_null())).execute()
        print(
            '     Updated deleted field in submissions.  Set deleted = False for ',
            supdatef, ' records.')
コード例 #15
0
def get_earliest_datetime():
    '''Finds the minimum datetime in the orders database'''
    earliestDatetime = Tabs.select(fn.MIN(Tabs.timestamp)).scalar(convert=True)
    return earliestDatetime
コード例 #16
0
ファイル: reports.py プロジェクト: tenet-ac-za/NZ-ORCID-Hub
def user_summary():  # noqa: D103

    form = DateRangeForm(request.args)
    sort = request.args.get("sort")
    if sort:
        try:
            sort = int(sort)
        except:
            sort = None
    desc = request.args.get("desc")
    if desc:
        try:
            desc = int(desc)
        except:
            desc = None

    if not (form.from_date.data and form.to_date.data):
        date_range = User.select(
            fn.MIN(User.created_at).alias('from_date'),
            fn.MAX(User.created_at).alias('to_date')).first()
        return redirect(
            url_for(
                "user_summary",
                from_date=date_range.from_date.date().isoformat(),
                to_date=date_range.to_date.date().isoformat(),
                sort=sort, desc=desc))

    user_counts = (User.select(
        User.organisation.alias("org_id"),
        fn.COUNT(fn.DISTINCT(User.id)).alias("user_count")).where(
            User.created_at.between(form.from_date.data, form.to_date.data)).join(
                UserOrg, JOIN.LEFT_OUTER, on=(UserOrg.org_id == User.id)).group_by(
                    User.organisation)).alias("user_counts")

    linked_counts = (OrcidToken.select(
        OrcidToken.org.alias("org_id"),
        fn.COUNT(fn.DISTINCT(OrcidToken.user)).alias("linked_user_count")).where(
            OrcidToken.created_at.between(form.from_date.data, form.to_date.data)).group_by(
                OrcidToken.org).alias("linked_counts"))

    query = (Organisation.select(
        Organisation.name,
        fn.COALESCE(user_counts.c.user_count, 0).alias("user_count"),
        fn.COALESCE(linked_counts.c.linked_user_count, 0).alias("linked_user_count")).join(
            user_counts, on=(Organisation.id == user_counts.c.org_id)).join(
                linked_counts, JOIN.LEFT_OUTER,
                on=(Organisation.id == linked_counts.c.org_id)))

    if sort == 1:
        order_fields = [SQL("user_count"), SQL("linked_user_count"), ]
    else:
        order_fields = [Organisation.name, ]
    if desc:
        order_fields = [f.desc() for f in order_fields]
    query = query.order_by(*order_fields)

    total_user_count = sum(r.user_count for r in query if r.user_count)
    total_linked_user_count = sum(r.linked_user_count for r in query if r.linked_user_count)

    headers = [(h,
                url_for(
                    "user_summary",
                    from_date=form.from_date.data,
                    to_date=form.to_date.data,
                    sort=i,
                    desc=1 if sort == i and not desc else 0))
               for i, h in enumerate(["Name", "Linked User Count / User Count (%)"])]

    return render_template(
        "user_summary.html",
        form=form,
        query=query,
        total_user_count=total_user_count,
        total_linked_user_count=total_linked_user_count,
        sort=sort, desc=desc,
        headers=headers)
コード例 #17
0
ファイル: utils.py プロジェクト: micasa001/selective_dca_bot
def open_positions_report():
    markets = [
        lp.market
        for lp in LongPosition.select(LongPosition.market).distinct()
    ]

    results = []
    result_str = "Open Positions:\n"
    total_net = Decimal('0.0')
    total_spent = Decimal('0.0')
    for market in markets:
        current_price = Candle.select().where(
            Candle.market == market).order_by(
                Candle.timestamp.desc()).limit(1)[0].close

        (num_positions, quantity, spent, min, avg, max,
         min_sell_price) = LongPosition.select(
             fn.COUNT(LongPosition.id), fn.SUM(LongPosition.buy_quantity),
             fn.SUM(LongPosition.buy_quantity * LongPosition.purchase_price),
             fn.MIN(LongPosition.purchase_price),
             fn.AVG(LongPosition.purchase_price),
             fn.MAX(LongPosition.purchase_price),
             fn.MIN(LongPosition.sell_price)).where(
                 LongPosition.market == market,
                 LongPosition.sell_timestamp.is_null(True)).scalar(
                     as_tuple=True)

        if not num_positions:
            continue

        quantity = Decimal(quantity).quantize(Decimal('0.00000001'))
        spent = Decimal(spent)

        current_value = quantity * current_price

        profit = (current_value - spent).quantize(Decimal('0.00000001'))
        total_net += profit
        total_spent += spent
        current_profit_percentage = (current_value /
                                     spent * Decimal('100.0')).quantize(
                                         Decimal('0.01'))

        results.append({
            "market":
            market,
            "num_positions":
            num_positions,
            "min_position":
            min.quantize(Decimal('0.00000001')),
            "avg_position":
            avg.quantize(Decimal('0.00000001')),
            "max_position":
            max.quantize(Decimal('0.00000001')),
            "min_sell_price":
            min_sell_price.quantize(Decimal('0.00000001')),
            "min_profit_percentage":
            (min_sell_price / min * Decimal('100.00')).quantize(
                Decimal('0.01')),
            "profit":
            profit,
            "current_profit_percentage":
            current_profit_percentage,
            "quantity":
            quantity.normalize()
        })

    if total_spent > Decimal('0.0'):
        total_percentage = (total_net / total_spent *
                            Decimal('100.0')).quantize(Decimal('0.01'))
    else:
        total_percentage = Decimal('0.0')
    for result in sorted(results, key=lambda i: i['profit'], reverse=True):
        result_str += f"{'{:>8}'.format(result['market'])}: {result['min_position']:0.8f} | {result['min_sell_price']:0.8f} ({'{:>6}'.format(str(result['min_profit_percentage']))}%) | {'{:>2}'.format(str(result['num_positions']))} | {'{:>6}'.format(str(result['current_profit_percentage']))}%\n"

    result_str += f"{'-' * 53}\n"
    result_str += f"   total: {'{:>11}'.format(str(total_net))} | {'{:>6}'.format(str(total_percentage))}%\n"

    return result_str