Пример #1
0
 def span(cls):
     return (
         cls.select(fn.MIN(cls.id)).scalar(),
         cls.select(fn.MAX(cls.id)).scalar(),
     )
Пример #2
0
    def max(cls):
        """Get the maximum integer id used in this table.

        Guaranteed to return an integer (i.e. never ``None``)."""
        return cls.select(fn.MAX(cls.id)).scalar() or 0
Пример #3
0
    def get_stats(cls):
        query = (Pokemon
                 .select(Pokemon.pokemon_id, fn.COUNT(Pokemon.pokemon_id).alias('count'), fn.MAX(Pokemon.disappear_time).alias('lastseen'))
                 .group_by(Pokemon.pokemon_id)
                 .order_by(-SQL('count'))
                 .dicts())

        pokemons = list(query)

        known_pokemon = set( p['pokemon_id'] for p in query )
        unknown_pokemon = set(range(1,151)).difference(known_pokemon)
        pokemons.extend( { 'pokemon_id': i, 'count': 0, 'lastseen': None } for i in unknown_pokemon)

        for p in pokemons:
            p['pokemon_name'] = get_pokemon_name(p['pokemon_id'])
        return pokemons
Пример #4
0
    def get(self):
        start = request.args.get('start') or 0
        length = request.args.get('length') or -1
        radarrid = request.args.get('radarrid')

        upgradable_movies = []
        upgradable_movies_not_perfect = []
        if settings.general.getboolean('upgrade_subs'):
            days_to_upgrade_subs = settings.general.days_to_upgrade_subs
            minimum_timestamp = (
                (datetime.datetime.now() -
                 timedelta(days=int(days_to_upgrade_subs))) -
                datetime.datetime(1970, 1, 1)).total_seconds()

            if settings.general.getboolean('upgrade_manual'):
                query_actions = [1, 2, 3, 6]
            else:
                query_actions = [1, 3]

            upgradable_movies_conditions = [
                (TableHistoryMovie.action.in_(query_actions)),
                (TableHistoryMovie.timestamp > minimum_timestamp),
                (TableHistoryMovie.score.is_null(False))
            ]
            upgradable_movies_conditions += get_exclusion_clause('movie')
            upgradable_movies = TableHistoryMovie.select(TableHistoryMovie.video_path,
                                                         fn.MAX(TableHistoryMovie.timestamp).alias('timestamp'),
                                                         TableHistoryMovie.score,
                                                         TableMovies.tags,
                                                         TableMovies.monitored)\
                .join(TableMovies, on=(TableHistoryMovie.radarrId == TableMovies.radarrId))\
                .where(reduce(operator.and_, upgradable_movies_conditions))\
                .group_by(TableHistoryMovie.video_path)\
                .dicts()
            upgradable_movies = list(upgradable_movies)

            for upgradable_movie in upgradable_movies:
                if upgradable_movie['timestamp'] > minimum_timestamp:
                    try:
                        int(upgradable_movie['score'])
                    except ValueError:
                        pass
                    else:
                        if int(upgradable_movie['score']) < 120:
                            upgradable_movies_not_perfect.append(
                                upgradable_movie)

        query_conditions = [(TableMovies.title.is_null(False))]
        if radarrid:
            query_conditions.append((TableMovies.radarrId == radarrid))
        query_condition = reduce(operator.and_, query_conditions)

        movie_history = TableHistoryMovie.select(TableHistoryMovie.id,
                                                 TableHistoryMovie.action,
                                                 TableMovies.title,
                                                 TableHistoryMovie.timestamp,
                                                 TableHistoryMovie.description,
                                                 TableHistoryMovie.radarrId,
                                                 TableMovies.monitored,
                                                 TableHistoryMovie.video_path.alias('path'),
                                                 TableHistoryMovie.language,
                                                 TableMovies.tags,
                                                 TableHistoryMovie.score,
                                                 TableHistoryMovie.subs_id,
                                                 TableHistoryMovie.provider,
                                                 TableHistoryMovie.subtitles_path)\
            .join(TableMovies, on=(TableHistoryMovie.radarrId == TableMovies.radarrId))\
            .where(query_condition)\
            .order_by(TableHistoryMovie.timestamp.desc())\
            .limit(length)\
            .offset(start)\
            .dicts()
        movie_history = list(movie_history)

        blacklist_db = TableBlacklistMovie.select(
            TableBlacklistMovie.provider, TableBlacklistMovie.subs_id).dicts()
        blacklist_db = list(blacklist_db)

        for item in movie_history:
            # Mark movies as upgradable or not
            item.update({"upgradable": False})
            if {
                    "video_path": str(item['path']),
                    "timestamp": float(item['timestamp']),
                    "score": str(item['score']),
                    "tags": str(item['tags']),
                    "monitored": str(item['monitored'])
            } in upgradable_movies_not_perfect:  # noqa: E129
                if os.path.isfile(
                        path_mappings.path_replace_movie(
                            item['subtitles_path'])):
                    item.update({"upgradable": True})

            del item['path']

            postprocessMovie(item)

            if item['score']:
                item['score'] = str(round(
                    (int(item['score']) * 100 / 120), 2)) + "%"

            # Make timestamp pretty
            if item['timestamp']:
                item["raw_timestamp"] = int(item['timestamp'])
                item["parsed_timestamp"] = datetime.datetime.fromtimestamp(
                    int(item['timestamp'])).strftime('%x %X')
                item['timestamp'] = pretty.date(item["raw_timestamp"])

            # Check if subtitles is blacklisted
            item.update({"blacklisted": False})
            if item['action'] not in [0, 4, 5]:
                for blacklisted_item in blacklist_db:
                    if blacklisted_item['provider'] == item[
                            'provider'] and blacklisted_item[
                                'subs_id'] == item['subs_id']:  # noqa: E125
                        item.update({"blacklisted": True})
                        break

        count = TableHistoryMovie.select()\
            .join(TableMovies, on=(TableHistoryMovie.radarrId == TableMovies.radarrId))\
            .where(TableMovies.title.is_null(False))\
            .count()

        return jsonify(data=movie_history, total=count)
Пример #5
0
def process_comments(appcfg):
    # Get newest comments with two week overlap
    print('   PROCESSING NEWEST PUSHSHIFT.IO COMMENTS FOR', appcfg.subreddit)

    try:
        newest_utc = int(
            Comment.select(fn.MAX(Comment.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        newest_utc = None
    if newest_utc is not None:
        oldestdate = newest_utc  # - 1209600  # two weeks overlap, in seconds
    else:
        oldestdate = appcfg.oldestdate

    try:
        comment_id_set = get_push_comments(appcfg, appcfg.newestdate,
                                           oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        comment_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return comment_id_set

    # Get oldest comments in case progress was interrupted, with two week overlap
    try:
        oldest_utc = int(
            Comment.select(fn.MIN(Comment.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        oldest_utc = None
    if oldest_utc is not None:
        newestdate = oldest_utc  # + 1209600  # two weeks overlap, in seconds
    else:
        newestdate = appcfg.newestdate
    print('   PROCESSING OLDEST PUSHSHIFT.IO COMMENTS FOR', appcfg.subreddit)

    try:
        old_comment_id_set = get_push_comments(appcfg, newestdate,
                                               appcfg.oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        old_comment_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return old_comment_id_set
    comment_id_set |= old_comment_id_set
    filedate = arrow.now().timestamp
    basedir = "/rpa" if os.environ.get('DOCKER', '0') == '1' else '.'
    coutput_file_path = "{basedir}/{subreddit}_comments_{timestamp}.txt".format(
        basedir=basedir, subreddit=appcfg.subreddit, timestamp=filedate)

    # with open(coutput_file_path, 'w', encoding='UTF-8') as comment_file:
    #     comment_file.writelines(comment_id_set)
    print("     Total comments submitted to", appcfg.subreddit, "in set:",
          len(comment_id_set))
    deleted = Author.get_or_none(name='[deleted]')
    if deleted is not None:
        cupdatet = Comment.update(deleted=True).where(
            (Comment.author == deleted.id)
            & (Comment.deleted.is_null() or Comment.deleted == 0)).execute()
        print(
            '     Updated deleted field in comments.  Set deleted = True for',
            cupdatet, 'records.')
        cupdatef = Comment.update(
            deleted=False).where((Comment.author != deleted.id)
                                 & (Comment.deleted.is_null())).execute()
        print(
            '     Updated deleted field in comments.  Set deleted = False for',
            cupdatef, 'records.')
Пример #6
0
def nextqueue_position(self):
    lastPos = Group.select(fn.MAX(Group.queue_position)).scalar()
    if lastPos is None:
        return 0
    else:
        return lastPos + 1
Пример #7
0
 def with_last_voted(cls):
     """Queryset annotated with .last_voted"""
     from app.models import Vote
     return cls.select().annotate(Vote,
                                  fn.MAX(Vote.time).alias('last_voted'))
Пример #8
0
def update_acm_rank(contest_id, user_id, problem_id, sub_create_time, result):
    with db.connection_context():
        record = acm_contest_rank.get_or_none(
            acm_contest_rank.contest_id == contest_id,
            acm_contest_rank.user_id == user_id
        )

    if record is None:
        with db.connection_context():
            max_id = (
                acm_contest_rank.select(
                    fn.MAX(acm_contest_rank.id).alias("max"))
                .get()
                .max
            )
            # print(max_id)
            # assert False
        record = acm_contest_rank(
            # id=(max_id + 1 if  max_id else 1),
            user_id=user_id,
            contest_id=contest_id,
            submission_number=0,
            accepted_number=0,
            total_time=0,
            submission_info={},
        )
    #print(record.submission_info.get(problem_id, {}))
    #print(record.submission_info.get(problem_id, {}).get("is_ac", False))
    if record.submission_info.get(str(problem_id), {}).get("is_ac", False):
        # logging.debug('dsalkjf')
        print('adsfsad')
        return

    record.submission_number += 1

    record.submission_info[str(problem_id)] = record.submission_info.get(
        str(problem_id), {})
    if result == 0:
        # print(0)
        print('---------------------------------')
        print(sub_create_time)
        time = contest.select(contest.create_time).where(
            contest.id == contest_id).get().create_time
        print(time)
        print(sub_create_time - time)
        time = (sub_create_time - time).seconds + \
            (sub_create_time - time).days * 24 * 60 * 60
        print(time)
        record.total_time += time
        record.accepted_number += 1
        record.submission_info[str(problem_id)] = record.submission_info.get(
            str(problem_id), 0)
        record.submission_info[str(problem_id)]["is_ac"] = True
        record.submission_info[str(problem_id)]["ac_time"] = time
        with db.connection_context():
            # print('cp')
            cp = (
                contest_problem.select()
                .where(
                    contest_problem.contest_id == contest_id,
                    contest_problem.problem_id == problem_id,
                )
                .get()
            )
            # print(cp.first_ac_id)
            if cp.first_ac_id is None:
                # print('-------------------------none')
                record.submission_info[str(problem_id)]["is_first_ac"] = True
                cp.first_ac_id = user_id
                cp.save()
    else:
        record.total_time += 1200
        record.submission_info[str(problem_id)] = record.submission_info.get(
            str(problem_id), 0)
        record.submission_info[str(problem_id)]["is_ac"] = False
        #print(record.submission_info[str(problem_id)].get("error_number", 0))
        record.submission_info[str(problem_id)]["error_number"] = (
            record.submission_info[str(problem_id)].get("error_number", 0) + 1
        )

    with db.connection_context():
        # print('save')
        return record.save()
Пример #9
0
def get_bounty_max_block_number():
    return BountyEvent.select(fn.MAX(BountyEvent.block_number)).scalar()
Пример #10
0
 def with_last_mined(cls):
     """Queryset annotated with .last_mined"""
     from app.models import Block
     return cls.select().annotate(Block,
                                  fn.MAX(Block.time).alias('last_mined'))
Пример #11
0
def genesis():
    return db.Genesis.select(fn.MAX(db.Genesis.genesis)).get().max
Пример #12
0
def last_msg():
    last_msg = db.Telegram.select(fn.MAX(db.Telegram.update_id)).get().max
    return last_msg if last_msg is not None else settings.TELEGRAM_OFFSET
Пример #13
0
def open_positions_report():
    markets = [
        lp.market
        for lp in LongPosition.select(LongPosition.market).distinct()
    ]

    results = []
    result_str = "Open Positions:\n"
    total_net = Decimal('0.0')
    total_spent = Decimal('0.0')
    for market in markets:
        current_price = Candle.select().where(
            Candle.market == market).order_by(
                Candle.timestamp.desc()).limit(1)[0].close

        (num_positions, quantity, spent, min, avg, max,
         min_sell_price) = LongPosition.select(
             fn.COUNT(LongPosition.id), fn.SUM(LongPosition.buy_quantity),
             fn.SUM(LongPosition.buy_quantity * LongPosition.purchase_price),
             fn.MIN(LongPosition.purchase_price),
             fn.AVG(LongPosition.purchase_price),
             fn.MAX(LongPosition.purchase_price),
             fn.MIN(LongPosition.sell_price)).where(
                 LongPosition.market == market,
                 LongPosition.sell_timestamp.is_null(True)).scalar(
                     as_tuple=True)

        if not num_positions:
            continue

        quantity = Decimal(quantity).quantize(Decimal('0.00000001'))
        spent = Decimal(spent)

        current_value = quantity * current_price

        profit = (current_value - spent).quantize(Decimal('0.00000001'))
        total_net += profit
        total_spent += spent
        current_profit_percentage = (current_value /
                                     spent * Decimal('100.0')).quantize(
                                         Decimal('0.01'))

        results.append({
            "market":
            market,
            "num_positions":
            num_positions,
            "min_position":
            min.quantize(Decimal('0.00000001')),
            "avg_position":
            avg.quantize(Decimal('0.00000001')),
            "max_position":
            max.quantize(Decimal('0.00000001')),
            "min_sell_price":
            min_sell_price.quantize(Decimal('0.00000001')),
            "min_profit_percentage":
            (min_sell_price / min * Decimal('100.00')).quantize(
                Decimal('0.01')),
            "profit":
            profit,
            "current_profit_percentage":
            current_profit_percentage,
            "quantity":
            quantity.normalize()
        })

    if total_spent > Decimal('0.0'):
        total_percentage = (total_net / total_spent *
                            Decimal('100.0')).quantize(Decimal('0.01'))
    else:
        total_percentage = Decimal('0.0')
    for result in sorted(results, key=lambda i: i['profit'], reverse=True):
        result_str += f"{'{:>8}'.format(result['market'])}: {result['min_position']:0.8f} | {result['min_sell_price']:0.8f} ({'{:>6}'.format(str(result['min_profit_percentage']))}%) | {'{:>2}'.format(str(result['num_positions']))} | {'{:>6}'.format(str(result['current_profit_percentage']))}%\n"

    result_str += f"{'-' * 53}\n"
    result_str += f"   total: {'{:>11}'.format(str(total_net))} | {'{:>6}'.format(str(total_percentage))}%\n"

    return result_str
Пример #14
0
    def info(self, event, user: User = None):
        if not user:
            user = event.author
        else:
            if not isinstance(user, DiscoUser):
                try:
                    user = self.state.guilds[event.guild.id].members[user].user
                except KeyError:
                    try:
                        user = self.state.users[user]
                    except KeyError:
                        try:
                            user = self.bot.client.api.users_get(user)
                        except APIException:
                            return event.msg.reply(
                                ':eyes: User not found').after(3).delete()

        self.client.api.channels_typing(event.channel.id)

        content = []
        content.append('**\u276F User Information**')
        content.append('Profile: <@{}>'.format(user.id))

        created_dt = to_datetime(user.id)
        content.append('Created: <t:{0}:R> (<t:{0}:f>)'.format(
            int(created_dt.replace(tzinfo=pytz.UTC).timestamp())))

        member = event.guild.get_member(user.id) if event.guild else None

        if user.public_flags:
            badges = ''
            user_badges = list(UserFlags(user.public_flags))
            for badge in user_badges:
                badges += '<{}> '.format(BADGE_EMOJI[badge])

            content.append('Badges: {}'.format(badges))

        if member:
            content.append('\n**\u276F Member Information**')

            if member.nick:
                content.append('Nickname: {}'.format(member.nick))

            content.append('Joined: <t:{0}:R> (<t:{0}:f>)'.format(
                int(member.joined_at.replace(tzinfo=pytz.UTC).timestamp())))

            content.append('Messages: {}'.format(
                int(
                    Message.select(fn.Count(
                        Message.id)).where((Message.author_id == user.id)
                                           & (Message.guild_id == event.guild.
                                              id)).tuples()[0][0])))

            if member.roles:
                content.append('Roles: {}'.format(', '.join(
                    ('<@&{}>'.format(r) for r in member.roles))))

        # Execute a bunch of queries
        newest_msg = Message.select(fn.MAX(Message.id)).where(
            (Message.author_id == user.id)
            & (Message.guild_id == event.guild.id)).tuples()[0][0]

        infractions = Infraction.select(Infraction.id).where(
            (Infraction.user_id == user.id)
            & (Infraction.guild_id == event.guild.id)).tuples()

        if newest_msg:
            content.append('\n **\u276F Activity**')
            content.append('Last Message: <t:{0}:R> (<t:{0}:f>)'.format(
                int((to_datetime(newest_msg).replace(
                    tzinfo=pytz.UTC)).timestamp())))
            # content.append('First Message: {} ({})'.format(
            #    humanize.naturaltime(datetime.utcnow() - to_datetime(oldest_msg)),
            #    to_datetime(oldest_msg).strftime("%b %d %Y %H:%M:%S"),
            # ))

        if len(infractions) > 0:
            content.append('\n**\u276F Infractions**')
            total = len(infractions)
            content.append('Total Infractions: **{:,}**'.format(total))

        embed = MessageEmbed()

        try:
            avatar = User.with_id(user.id).get_avatar_url()
        except APIException:
            avatar = user.get_avatar_url(
            )  # This fails if the user has never been seen by speedboat.

        embed.set_author(name='{} ({})'.format(
            str(user),
            user.id,
        ),
                         icon_url=avatar)

        embed.set_thumbnail(url=avatar)

        embed.description = '\n'.join(content)
        embed.color = get_dominant_colors_user(user, avatar)
        event.msg.reply('', embed=embed)
Пример #15
0
def get_last_datetime():
    '''Finds the maximum datetime in the orders database'''
    lastDatetime = Tabs.select(fn.MAX(Tabs.timestamp)).scalar(convert=True)
    return lastDatetime
Пример #16
0
def user_summary():  # noqa: D103

    form = DateRangeForm(request.args)
    sort = request.args.get("sort")
    if sort:
        try:
            sort = int(sort)
        except:
            sort = None
    desc = request.args.get("desc")
    if desc:
        try:
            desc = int(desc)
        except:
            desc = None

    if not (form.from_date.data and form.to_date.data):
        date_range = User.select(
            fn.MIN(User.created_at).alias('from_date'),
            fn.MAX(User.created_at).alias('to_date')).first()
        return redirect(
            url_for(
                "user_summary",
                from_date=date_range.from_date.date().isoformat(),
                to_date=date_range.to_date.date().isoformat(),
                sort=sort, desc=desc))

    user_counts = (User.select(
        User.organisation.alias("org_id"),
        fn.COUNT(fn.DISTINCT(User.id)).alias("user_count")).where(
            User.created_at.between(form.from_date.data, form.to_date.data)).join(
                UserOrg, JOIN.LEFT_OUTER, on=(UserOrg.org_id == User.id)).group_by(
                    User.organisation)).alias("user_counts")

    linked_counts = (OrcidToken.select(
        OrcidToken.org.alias("org_id"),
        fn.COUNT(fn.DISTINCT(OrcidToken.user)).alias("linked_user_count")).where(
            OrcidToken.created_at.between(form.from_date.data, form.to_date.data)).group_by(
                OrcidToken.org).alias("linked_counts"))

    query = (Organisation.select(
        Organisation.name,
        fn.COALESCE(user_counts.c.user_count, 0).alias("user_count"),
        fn.COALESCE(linked_counts.c.linked_user_count, 0).alias("linked_user_count")).join(
            user_counts, on=(Organisation.id == user_counts.c.org_id)).join(
                linked_counts, JOIN.LEFT_OUTER,
                on=(Organisation.id == linked_counts.c.org_id)))

    if sort == 1:
        order_fields = [SQL("user_count"), SQL("linked_user_count"), ]
    else:
        order_fields = [Organisation.name, ]
    if desc:
        order_fields = [f.desc() for f in order_fields]
    query = query.order_by(*order_fields)

    total_user_count = sum(r.user_count for r in query if r.user_count)
    total_linked_user_count = sum(r.linked_user_count for r in query if r.linked_user_count)

    headers = [(h,
                url_for(
                    "user_summary",
                    from_date=form.from_date.data,
                    to_date=form.to_date.data,
                    sort=i,
                    desc=1 if sort == i and not desc else 0))
               for i, h in enumerate(["Name", "Linked User Count / User Count (%)"])]

    return render_template(
        "user_summary.html",
        form=form,
        query=query,
        total_user_count=total_user_count,
        total_linked_user_count=total_linked_user_count,
        sort=sort, desc=desc,
        headers=headers)
Пример #17
0
def process_submissions(appcfg):
    # Get newest submissions with two week overlap
    print('   PROCESSING NEWEST PUSHSHIFT.IO SUBMISSIONS FOR',
          appcfg.subreddit)

    try:
        newest_utc = int(
            Submission.select(fn.MAX(
                Submission.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        newest_utc = None
    if newest_utc is not None:
        oldestdate = newest_utc  # - 1209600  # two weeks overlap, in seconds
    else:
        oldestdate = appcfg.oldestdate

    try:
        post_id_set = get_push_submissions(appcfg, appcfg.newestdate,
                                           oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        post_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return post_id_set

    # Get oldest submissions in case progress was interrupted, with four week overlap
    try:
        oldest_utc = int(
            Submission.select(fn.MIN(
                Submission.created_utc)).scalar().timestamp())
    except (TypeError, AttributeError):
        oldest_utc = None
    if oldest_utc is not None:
        newestdate = oldest_utc  # + 2400000  # four week overlap, in seconds
    else:
        newestdate = appcfg.newestdate
    print('   PROCESSING OLDEST PUSHSHIFT.IO SUBMISSIONS FOR',
          appcfg.subreddit)

    try:
        old_post_id_set = get_push_submissions(appcfg, newestdate,
                                               appcfg.oldestdate)
    except (ConnectionError, SSLError, ChunkedEncodingError):
        old_post_id_set = None
        print("     Connection Error for Pushshift API.  Quitting...")
        # quit()
        return old_post_id_set

    post_id_set |= old_post_id_set
    filedate = arrow.now().timestamp
    output_file_path = "{subreddit}_{timestamp}.csv".format(
        subreddit=appcfg.subreddit, timestamp=filedate)

    # with open(output_file_path, 'w', encoding='UTF-8') as post_file:
    #     post_file.writelines(post_id_set)

    print("     Total posts submitted to", appcfg.subreddit, "in set:",
          len(post_id_set))
    deleted = Author.get_or_none(name='[deleted]')
    if deleted is not None:
        supdatet = Submission.update(
            deleted=True).where((Submission.author == deleted.id)
                                & (Submission.deleted.is_null()
                                   or Submission.deleted == 0)).execute()
        print(
            '     Updated deleted field in submissions.  Set deleted = True for ',
            supdatet, ' records.')
        supdatef = Submission.update(
            deleted=False).where((Submission.author != deleted.id)
                                 & (Submission.deleted.is_null())).execute()
        print(
            '     Updated deleted field in submissions.  Set deleted = False for ',
            supdatef, ' records.')
Пример #18
0
    def write(self, path, buf, offset, fh):
        print('Write:')
        print('  offset: ' + str(offset))
        print('  bytes: ' + str(len(buf)))
        if path not in self.tempfiles:
            self.tempfiles[path] = {
                'file': tempfile.SpooledTemporaryFile(max_size=50 * (1024**2))
            }
        self.tempfiles[path]['lastwrite'] = datetime.datetime.now()
        self.tempfiles[path]['file'].seek(offset)
        print(self.tempfiles[path]['file'].write(buf))
        return

        print('Write:')
        print('  offset: ' + str(offset))
        print('  bytes: ' + str(len(buf)))
        file_id = self._get_path_ids(path)[-1]
        f = File.get(id=file_id)

        nof_chunks = int(len(buf) / self._tgchunk_size) + (
            1 if len(buf) != self._tgchunk_size else 0)
        first_chunk_start_position = offset - (offset % self._tgchunk_size)
        first_chunk_no = int(first_chunk_start_position / self._tgchunk_size)

        chunks_streams = []
        for x in range(first_chunk_no, first_chunk_no + nof_chunks):
            tgd, created = TelegramDocument.get_or_create(file_id=f.id,
                                                          file_no=x)
            if created:
                chunks_streams.append(None)
            else:
                chunks_streams.append(self._get_file(tgd.telegram_id))

        for x in range(first_chunk_no, first_chunk_no + nof_chunks):
            chunk_stream = chunks_streams.pop(0)
            chunk_buf = tempfile.SpooledTemporaryFile(max_size=1024**3)
            chunk_buf.seek(0)
            if chunk_stream is not None:
                chunk_buf.write(chunk_stream.read())

            if x is 0:
                start_chunk = offset % self._tgchunk_size
                last_chunk = self._tgchunk_size
                start_buf = start_chunk + (first_chunk_no * self._tgchunk_size)
                last_buf = (first_chunk_no + 1) * self._tgchunk_size
            elif x is (first_chunk_no + nof_chunks - 1):
                start_chunk = 0
                last_chunk = self._tgchunk_size
                start_buf = x * self._tgchunk_size
                last_buf = ((x + 1) * self._tgchunk_size) - 1
            else:
                start_chunk = 0
                last_chunk = (offset + len(buf)) % self._tgchunk_size
                start_buf = x * self._tgchunk_size
                last_buf = start_buf + last_chunk

            chunk_buf.seek(start_chunk)
            print('  bytes to write (' + str(start_buf) + ':' + str(last_buf) +
                  '): ' + str(len(buf[start_buf:last_buf])))
            bytes_written = chunk_buf.write(buf[start_buf:last_buf])
            print('  bytes written: ' + str(bytes_written))
            # chunk_buf[start_chunk:last_chunk] = buf[start_buf:last_buf]
            tg_doc_id = self._upload_file(chunk_buf)
            tgd.telegram_id = tg_doc_id
            tgd.save()

        last_tgd = TelegramDocument.select(
            fn.MAX(TelegramDocument.file_no), TelegramDocument.id,
            TelegramDocument.telegram_id, TelegramDocument.file_no).where(
                TelegramDocument.file_id == f.id).first()
        file_size = last_tgd.file_no * self._tgchunk_size
        file_size += int(
            self._get_file(last_tgd.telegram_id).headers['Content-length'])
        print('Last file size: ' +
              self._get_file(last_tgd.telegram_id).headers['Content-length'])

        f.updated_at = datetime.datetime.now()
        f.size = file_size
        f.save()
        print('\n')
        return f.size
Пример #19
0
 def get_highest_number(cls, course: Course):
     return (cls.select(fn.MAX(cls.number)).where(
         cls.course == course).group_by(cls.course).scalar())
Пример #20
0
def getProcessedFile():
    file = Mad_ProcessedFile.select(Mad_ProcessedFile.name).where(Mad_ProcessedFile.id == Mad_ProcessedFile.select(fn.MAX(Mad_ProcessedFile.id)).scalar()).dicts()  # pylint: disable=E1120 # noqa
    try:
        return file[0]['name']
    except IndexError:
        return minstr()
Пример #21
0
    def info(self, event, user=None):

        if not user:
            user = event.author
        else:
            if not isinstance(user, DiscoUser):
                try:
                    user = self.state.guilds[event.guild.id].members[user].user
                except KeyError:
                    try:
                        user = self.state.users[user]
                    except KeyError:
                        try:
                            user = self.bot.client.api.users_get(user)
                        except APIException:
                            return event.msg.reply(
                                'User not found :eyes:').after(3).delete()

        self.client.api.channels_typing(event.channel.id)

        content = []
        content.append('**\u276F User Information**')
        content.append('Profile: <@{}>'.format(user.id))

        created_dt = to_datetime(user.id)
        content.append('Created: {} ({})'.format(
            humanize.naturaltime(datetime.utcnow() - created_dt),
            created_dt.strftime("%b %d %Y %H:%M:%S")))

        member = event.guild.get_member(user.id) if event.guild else None

        if user.presence:  #I couldn't get this to work w/o it lol
            emoji, status = get_status_emoji(user.presence)
            content.append('Status: <{}> {}'.format(emoji, status))
            if user.presence.game and user.presence.game.name:
                if user.presence.game.type == ActivityTypes.DEFAULT:
                    content.append('{}'.format(user.presence.game.name))
                if user.presence.game.type == ActivityTypes.CUSTOM:
                    content.append('Custom Status: {}'.format(
                        user.presence.game.state))
                if user.presence.game.type == ActivityTypes.LISTENING:
                    content.append('Listening to {} on Spotify'.format(
                        user.presence.game.details)
                                   )  #In the embed, details is the songname.
                if user.presence.game.type == ActivityTypes.STREAMING:
                    content.append('Streaming: [{}]({})'.format(
                        user.presence.game.name, user.presence.game.url))

        if user.public_flags:
            badges = ''
            user_badges = list(UserFlags(user.public_flags))
            for badge in user_badges:
                badges += '<{}> '.format(BADGE_EMOJI[badge])

            content.append('Badges: {}'.format(badges))

        if member:
            content.append('\n**\u276F Member Information**')

            if member.nick:
                content.append('Nickname: {}'.format(member.nick))

            content.append('Joined: {} ago ({})'.format(
                humanize.naturaldelta(datetime.utcnow() - member.joined_at),
                member.joined_at.strftime("%b %d %Y %H:%M:%S"),
            ))

            if member.roles:
                content.append('Roles: {}'.format(', '.join(
                    ('<@&{}>'.format(member.guild.roles.get(r).id)
                     for r in member.roles))))

        # Execute a bunch of queries
        newest_msg = Message.select(fn.MAX(Message.id)).where(
            (Message.author_id == user.id)
            & (Message.guild_id == event.guild.id)).tuples()[0][0]

        oldest_msg = Message.select(fn.MIN(Message.id)).where(
            (Message.author_id == user.id)
            & (Message.guild_id == event.guild.id)).tuples()[0][0]  #Slow Query

        voice = GuildVoiceSession.select(
            fn.COUNT(GuildVoiceSession.user_id),
            fn.SUM(GuildVoiceSession.ended_at - GuildVoiceSession.started_at)
        ).where((GuildVoiceSession.user_id == user.id)
                & (~(GuildVoiceSession.ended_at >> None))
                & (GuildVoiceSession.guild_id == event.guild.id)).tuples()[0]

        infractions = Infraction.select(Infraction.id).where(
            (Infraction.user_id == user.id)
            & (Infraction.guild_id == event.guild.id)).tuples()

        if newest_msg and oldest_msg:
            content.append('\n **\u276F Activity**')
            content.append('Last Message: {} ({})'.format(
                humanize.naturaltime(datetime.utcnow() -
                                     to_datetime(newest_msg)),
                to_datetime(newest_msg).strftime("%b %d %Y %H:%M:%S"),
            ))
            content.append('First Message: {} ({})'.format(
                humanize.naturaltime(datetime.utcnow() -
                                     to_datetime(oldest_msg)),
                to_datetime(oldest_msg).strftime("%b %d %Y %H:%M:%S"),
            ))

        if len(infractions) > 0:
            content.append('\n**\u276F Infractions**')
            total = len(infractions)
            content.append('Total Infractions: **{:,}**'.format(total))

        if voice[0]:
            content.append('\n**\u276F Voice**')
            content.append('Sessions: `{:,}`'.format(voice[0]))
            content.append('Time: `{}`'.format(
                str(humanize.naturaldelta(voice[1])).title()))

        embed = MessageEmbed()

        try:
            avatar = User.with_id(user.id).get_avatar_url()
        except:
            avatar = user.get_avatar_url(
            )  # This fails if the user has never been seen by speedboat.

        embed.set_author(name='{}#{} ({})'.format(
            user.username,
            user.discriminator,
            user.id,
        ),
                         icon_url=avatar)

        embed.set_thumbnail(url=avatar)

        embed.description = '\n'.join(content)
        embed.color = get_dominant_colors_user(user, avatar)
        event.msg.reply('', embed=embed)
Пример #22
0
    def _parse_table_to_DB(table_contents) -> None:
        """Обработка скачанного расписания в базу данных

        Parameters
        ----------
        table_contents: bytes
            содержимое в виде набора байт скачанной таблицы
        """
        book = open_workbook(file_contents=table_contents)
        sheet = book.sheet_by_index(0)
        num_cols = sheet.ncols
        # Check if several columns in the beginning are duplicating
        start_offset = 0
        first_col = sheet.col_slice(start_offset, 0, 4)
        for col in range(1, num_cols):
            curr_col = sheet.col_slice(col, 0, 4)
            if all(first_col[i].value != curr_col[i].value
                   for i in range(len(first_col))):
                start_offset = col - 1
                break
        group_count = -1
        day_count = -1
        print_status_in = num_cols // 15

        existing_records = [{
            "day_id": None,
            "subject_id": None
        }, {
            "day_id": None,
            "subject_id": None
        }]

        group_max_count = Weeks.select(fn.MAX(Weeks.days_of_group_id)).scalar()
        if group_max_count is not None and group_max_count > 0:
            group_count = group_max_count
        day_max_count = Days.select(fn.MAX(
            Days.subject_schedules_of_day_id)).scalar()
        if day_max_count is not None and day_max_count > 0:
            day_count = day_max_count

        for col_index in range(num_cols):
            if col_index % print_status_in == 0:
                print(Fore.YELLOW +
                      f"Прогресс: {int((col_index / num_cols) * 100)}%" +
                      Style.RESET_ALL)
            group_cell = str(sheet.cell(1, col_index).value)
            if search(r'.{4}-\d{2}-\d{2}', group_cell):
                group_cell = findall(r'.{4}-\d{2}-\d{2}', group_cell)[0]
            else:
                continue

            for day in range(6):
                for lesson in range(6):
                    for evenness in range(2):
                        lesson_number = int(
                            sheet.cell(3 + lesson * 2 + day * 12,
                                       start_offset + 1).value)
                        if not Parser._lesson_start_end_table_filled:
                            lesson_start_time = str(
                                sheet.cell(3 + lesson * 2 + day * 12,
                                           start_offset + 2).value)
                            lesson_end_time = str(
                                sheet.cell(3 + lesson * 2 + day * 12,
                                           start_offset + 3).value)
                            lesson_start_time = time(
                                *map(int, lesson_start_time.split('-')))
                            lesson_end_time = time(
                                *map(int, lesson_end_time.split('-')))
                            Parser._fill_lesson_start_end_table(
                                lesson_number, lesson_start_time,
                                lesson_end_time)

                        subject = Parser._get_cell_info(
                            3 + evenness + lesson * 2 + day * 12, col_index,
                            sheet).replace("\n", " ")
                        lesson_type = Parser._get_cell_info(
                            3 + evenness + lesson * 2 + day * 12,
                            col_index + 1, sheet)
                        lecturer = Parser._get_cell_info(
                            3 + evenness + lesson * 2 + day * 12,
                            col_index + 2, sheet).replace(",", ".")
                        classroom = Parser._get_cell_info(
                            3 + evenness + lesson * 2 + day * 12,
                            col_index + 3, sheet)
                        url = Parser._get_cell_info(
                            3 + evenness + lesson * 2 + day * 12,
                            col_index + 4, sheet)

                        day_of_week_id = (group_count + 1) \
                            if lesson == 0 and day == 0 else (group_count - int(not bool(evenness)))
                        schedule_of_subject_id = (day_count + 1) \
                            if lesson == 0 else (day_count - int(not bool(evenness)))

                        try:
                            day_id = Weeks.get((Weeks.group == group_cell) & (
                                Weeks.even == bool(evenness))).days_of_group_id
                            existing_records[evenness]["day_id"] = day_id
                        except DoesNotExist:
                            existing_records[evenness]["day_id"] = None

                        try:
                            subject_id = Days.get(
                                (Days.day_of_week_id ==
                                 existing_records[evenness]["day_id"])
                                & (Days.day_of_week == Parser._week_days[day])
                            ).subject_schedules_of_day_id
                            existing_records[evenness][
                                "subject_id"] = subject_id
                        except DoesNotExist:
                            existing_records[evenness]["subject_id"] = None

                        if len([
                                i for i in Subjects.select().where(
                                    (Subjects.schedule_of_subject_id ==
                                     existing_records[evenness]["subject_id"])
                                    & (Subjects.lesson_number == lesson_number)
                                ).execute()
                        ]) == 0:
                            # print("Создаём " +
                            #       ', '.join([str(lesson_number), subject, lesson_type, lecturer, classroom, url])
                            #       + f" в subject_id {str(schedule_of_subject_id)}")
                            Subjects.create(
                                schedule_of_subject_id=schedule_of_subject_id,
                                lesson_number=lesson_number,
                                subject=subject,
                                lesson_type=lesson_type,
                                teacher=lecturer,
                                class_number=classroom,
                                link=url)
                        else:
                            s = Subjects.get(
                                (Subjects.schedule_of_subject_id ==
                                 existing_records[evenness]["subject_id"])
                                & (Subjects.lesson_number == lesson_number))
                            if s.subject != subject or s.lesson_type != lesson_type or s.teacher != lecturer or \
                                    s.class_number != classroom or s.link != url:
                                # print(f"Обновляем " +
                                #       ', '.join(
                                #           [str(lesson_number), subject, lesson_type, lecturer, classroom, url])
                                #       + " в subject_id {str(existing_records[evenness]['subject_id'])}")
                                s.subject = subject
                                s.lesson_type = lesson_type
                                s.teacher = lecturer
                                s.class_number = classroom
                                s.link = url
                                s.save()

                        if len([
                                i for i in Days.select().where(
                                    (Days.day_of_week_id ==
                                     existing_records[evenness]["day_id"])
                                    & (Days.day_of_week ==
                                       Parser._week_days[day])).execute()
                        ]) == 0:
                            if len([
                                    i for i in Days.select().where(
                                        (Days.day_of_week_id == day_of_week_id)
                                        & (Days.day_of_week ==
                                           Parser._week_days[day])).execute()
                            ]) == 0:
                                Days.create(
                                    day_of_week_id=day_of_week_id,
                                    day_of_week=Parser._week_days[day],
                                    subject_schedules_of_day_id=day_count + 1)
                                day_count += 1

                        if len([
                                i for i in Weeks.select().where(
                                    (Weeks.group == group_cell) &
                                    (Weeks.even == bool(evenness))).execute()
                        ]) == 0:
                            Weeks.create(group=group_cell,
                                         even=bool(evenness),
                                         days_of_group_id=group_count + 1)
                            group_count += 1
    def _execute(self) -> Tuple[List[PopularItemsOutput]]:
        """
        Execute new payment transaction
        :return: relevant output information
        """
        # Get order table joined with customer table
        order_customer_query = (Order.select(
            Order.id.alias("order_id"),
            Order.district_id.alias("district_id"),
            Order.warehouse_id.alias("warehouse_id"),
            Order.entry_date.alias("entry_date"),
            Customer.middle_name.alias("middle_name"),
            Customer.first_name.alias("first_name"),
            Customer.last_name.alias("last_name"),
        ).join(
            Customer,
            on=((Order.warehouse_id == Customer.warehouse_id)
                & (Order.district_id == Customer.district_id)
                & (Order.customer_id == Customer.id)),
        ).where((Order.warehouse_id == self.warehouse_id)
                & (Order.district_id == self.district_id)).order_by(
                    Order.entry_date.desc()).limit(
                        self.orders_to_examine).cte("order_customer_query"))

        # Get order lines with maximum quantity, joined with item table
        OrderLineInner: OrderLine = OrderLine.alias()
        order_line_sum_qty_query = (OrderLineInner.select(
            OrderLineInner.warehouse_id.alias("warehouse_id"),
            OrderLineInner.district_id.alias("district_id"),
            OrderLineInner.order_id.alias("order_id"),
            fn.SUM(OrderLineInner.quantity).alias("sum_qty"),
        ).where((OrderLineInner.warehouse_id == self.warehouse_id)
                & (OrderLineInner.district_id == self.district_id)).group_by(
                    OrderLineInner.warehouse_id,
                    OrderLineInner.district_id,
                    OrderLineInner.order_id,
                    OrderLineInner.item_id,
                ).cte("order_line_sum_qty_query"))
        order_line_max_qty_query = (order_line_sum_qty_query.select(
            order_line_sum_qty_query.c.order_id,
            fn.MAX(order_line_sum_qty_query.c.sum_qty),
        ).group_by(
            order_line_sum_qty_query.c.warehouse_id,
            order_line_sum_qty_query.c.district_id,
            order_line_sum_qty_query.c.order_id,
        ).with_cte(order_line_sum_qty_query))

        customer_name_field = Case(
            None,
            ((
                order_customer_query.c.middle_name.is_null(),
                fn.CONCAT(
                    order_customer_query.c.first_name,
                    " ",
                    order_customer_query.c.last_name,
                ),
            ), ),
            fn.CONCAT(
                order_customer_query.c.first_name,
                order_customer_query.c.middle_name,
                order_customer_query.c.last_name,
            ),
        ).alias("customer_name")

        popular_items_query = (OrderLine.select(
            order_customer_query.c.order_id,
            order_customer_query.c.entry_date,
            customer_name_field,
            fn.SUM(OrderLine.quantity).alias("quantity"),
            Item.id.alias("item_id"),
            Item.name.alias("item_name"),
        ).join(
            order_customer_query,
            on=((OrderLine.warehouse_id == order_customer_query.c.warehouse_id)
                & (OrderLine.district_id == order_customer_query.c.district_id)
                & (OrderLine.order_id == order_customer_query.c.order_id)),
        ).join(Item, on=(OrderLine.item_id == Item.id)).group_by(
            order_customer_query.c.order_id,
            order_customer_query.c.entry_date,
            customer_name_field,
            Item.id,
            Item.name,
        ).having(
            DBTuple(order_customer_query.c.order_id, fn.SUM(
                OrderLine.quantity)).in_(order_line_max_qty_query)).order_by(
                    order_customer_query.c.order_id.desc(),
                    fn.SUM(OrderLine.quantity).desc(),
                ).with_cte(order_customer_query))

        # Process query output
        return ([result for result in popular_items_query.dicts()], )