Ejemplo n.º 1
0
 def zkill(self, killid, mess):
     kill = requests.get(ZKILL.format(killid)).json()
     ship = self.ship_name(kill[0]["victim"]["shipTypeID"])
     victim = kill[0]["victim"]["characterName"]
     victimalliance = kill[0]["victim"]["allianceName"]
     attackers = 0
     for attacker in kill[0]["attackers"]:
         attackers += 1
         if int(attacker["finalBlow"]):
             killer = attacker["characterName"]
             killeralliance = attacker["allianceName"]
     # sometimes zkill doesnt give us a value, odd
     try:
         strValue = kill[0]["zkb"]["totalValue"]
         value = round(float(strValue))
         value = humanize.intword(value)
     except KeyError:
         value = "???"
     self.send(mess.getFrom(), "Victim: {}({})".format(victim, victimalliance), message_type=mess.getType())
     self.send(
         mess.getFrom(),
         "Killing Blow: {}({}) ({} other pilot(s) involved)".format(killer, killeralliance, attackers - 1),
         message_type=mess.getType(),
     )
     self.send(mess.getFrom(), "Ship: {} ({})".format(ship, value), message_type=mess.getType())
Ejemplo n.º 2
0
def delete(start, end, marvel=False):
    try:
        start_date = arrow.get(start, "YYYY-MM-DD")
        end_date = arrow.get(end, "YYYY-MM-DD")
        cur_date = start_date

        days = 0
        found = 0
        total_docs = 0
        total_size = 0
        while cur_date.date() != end_date.date():
            index = Index(cur_date.date(), marvel=marvel)

            if index.exists():
                found += 1
                result = index.show_stats()
                total_docs += result['docs_count']
                total_size += result['docs_size']
                
                index.delete()

            days += 1
            cur_date = cur_date.replace(days=+1)

        click.echo("Details regarding Indices from [{}] -> [{}]".format(start_date.format("MMM DD, YYYY"), end_date.format("MMM DD, YYYY")))
        click.echo("Found {} Indices, in {} days".format(found, days))
        click.echo("Total Docs deleted: {}".format(intword(total_docs)))
        click.echo("Total Size freed: {}".format(naturalsize(total_size)))

    except arrow.parser.ParserError:
        log.error("Exception while parsing Start/End dates")
        click.echo("Start/End dates in Incorrect format! Please enter Dates in ISO-Format: YYYY-MM-DD")
Ejemplo n.º 3
0
def format_isk_human(value):
    if value is None:
        return ""
    try:
        return "%s ISK" % humanize.intword(value, format='%.2f')
    except:
        return str(value)
Ejemplo n.º 4
0
 def _value(kill):
     try:
         # print kill
         strValue = kill["zkb"]["totalValue"]
         value = round(float(strValue))
         return humanize.intword(value)
     except:
         print(traceback.format_exc())
         return "???"
Ejemplo n.º 5
0
    def kill_list(self, mess, args):
        """Show everyone who is on the watch list."""
        now = datetime.datetime.utcnow()
        members = self["users"].values()
        members = sorted(members, key=lambda member: member['character_name'])
        for member in members:
            member['time_ago'] = ago.human(now - member['time'])

        return {'members': members, 'value': humanize.intword(self['value'])}
Ejemplo n.º 6
0
 def _value(kill):
     try:
         # print kill
         if "zkb" in kill:
             strValue = kill["zkb"]["totalValue"]
             value = round(float(strValue))
             return humanize.intword(value)
         else:                
             return None
     except:            
         return None
 def print_report(self):
     # TODO provide options to make this machine readable.
     megabytes = self.total_downloaded_data / (1024**2)
     average_bandwidth = megabytes / self.total_download_time
     print("{} queries downloaded {} of data at an average of {:.2f} Mib/s".format(
         humanize.intword(self.total_queries),
         humanize.naturalsize(self.total_downloaded_data, binary=True),
         average_bandwidth))
     print("Total mismatches = ", sum(self.mismatch_counts.values()))
     for name in sorted(self.mismatch_counts.keys()):
         print("", name, self.mismatch_counts[name], sep="\t")
Ejemplo n.º 8
0
 def kill_value(self, mess, args):
     """Limit kills to be announced over a given value in isk"""
     if not args:
         self['value'] = 10000000
         return 'Limiting kills announced to 10million ISK or higher'
     try:
         value = int(args[0])
     except Exception as e:
         return e
     else:
         self['value'] = value
     return "Now Limiting kills announced to values of {} ISK or higher".format(humanize.intword(value))
Ejemplo n.º 9
0
 def human_number(self, number):
     if number < 1000:
         return number
     elif number < 10000:
         number = str(number)
         return number[:1] + " " + number[1:]
     elif number < 100000:
         number = str(number)
         return number[:2] + " " + number[2:]
     elif number < 1000000:
         number = str(number)
         return number[:3] + " " + number[3:]
     else:
         return humanize.intword(number)
Ejemplo n.º 10
0
    def cmd_kill(self, args, msg, no_url=False, raw=None, host=None):
        """Returns a summary of a zKillboard killmail"""
        if not raw:
            if len(args) == 0:
                return '!kill <Kill ID/zKillboard URL>'
            kill_id = args[0]
            try:
                kill_id = int(kill_id)
            except ValueError:
                m = zkillboard_regex.match(kill_id)
                if m:
                    kill_id = m.groupdict()['killID']
                    host = m.groupdict()['host']
                else:
                    return 'Invalid kill ID'

            headers, data = ZKillboard(base_url='https://{}/api/'.format(host)).killID(kill_id).get()
            kill = data[0]
        else:
            kill = raw
            kill_id = raw['killID']

        if no_url:
            url = ''
        else:
            url = ' - https://{}/kill/{}/'.format(host, kill_id)

        # Ignore kills over an hour old if they're from stomp
        age = (datetime.utcnow() - datetime.strptime(kill['killTime'], '%Y-%m-%d %H:%M:%S'))
        if age.total_seconds() > 60 * 60 and raw:
            return

        # Drop kills less than 1mil if they've come from stomp
        if raw and float(kill['zkb']['totalValue']) < 1000000:
            return

        if 'zkb' in kill and 'totalValue' in kill['zkb']:
            value_lost = intword(float(kill['zkb']['totalValue']))
        else:
            value_lost = '???'

        return '{} ({}) in {}, {}, {} attacker(s), {} ISK lost{}'.format(
            kill['victim']['characterName'],
            self.types[unicode(kill['victim']['shipTypeID'])],
            self.map.node[int(kill['solarSystemID'])]['name'],
            naturaltime(age),
            len(kill['attackers']),
            value_lost,
            url,
        )
Ejemplo n.º 11
0
def top(name, limit=None, id=None):
    curs = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
    if name == "topkills":
        curs.execute("""select killvictim.*, killlist.price from killvictim, killlist where killvictim.killid = killlist.killid and killlist.time > now() - interval '7 days' order by killlist.price desc limit 5""")
    elif name == "toppods":
        curs.execute("""select killvictim.*, killlist.price from killvictim, killlist where killvictim.killid = killlist.killid and killlist.time > now() - interval '7 days' and killvictim.shiptypeid = 670 order by killlist.price desc limit 5""")
    retVal = []
    for kill in curs:
        data = itemMarketInfo(kill['shiptypeid'])
        retVal.append ({ 
        "killid": kill['killid'],
        "pilotname": kill['charactername'],
        "pilotid": kill['characterid'],
        "shipname": data['itemName'],
        "shipid": kill['shiptypeid'],
        "iskloss": humanize.intword(int(kill['price']))
        })
    return retVal
Ejemplo n.º 12
0
def table_html(self, limit=100, columns=None):
    """
    Return a HTML table with the dataframe cols, data types and values
    :param self:
    :param columns: Columns to be printed
    :param limit: how many rows will be printed
    :return:
    """

    columns = parse_columns(self, columns)

    data = self.select(columns).limit(limit).to_json()

    # Load template
    path = os.path.dirname(os.path.abspath(__file__))
    template_loader = jinja2.FileSystemLoader(searchpath=path +
                                              "//../templates")
    template_env = jinja2.Environment(loader=template_loader, autoescape=True)
    template = template_env.get_template("table.html")

    # Filter only the columns and data type info need it
    dtypes = [(
        i[0],
        i[1],
        j.nullable,
    ) for i, j in zip(self.dtypes, self.schema)]

    total_rows = self.count()
    if total_rows < limit:
        limit = total_rows

    total_rows = humanize.intword(total_rows)
    total_cols = self.cols.count()
    total_partitions = self.partitions()

    # Print table
    output = template.render(cols=dtypes,
                             data=data,
                             limit=limit,
                             total_rows=total_rows,
                             total_cols=total_cols,
                             partitions=total_partitions)
    return output
Ejemplo n.º 13
0
    def collision_probability(self, per_second=1000, probability=0.01):
        """Calculate how many ids have to be generated to have the given
        probability of collision

        """

        import humanize

        outputs = 2**(self.bytelength * 8)

        num = math.sqrt(2.0 * outputs * -math.log1p(-probability))

        seconds_in_year = 3600 * 24 * 365

        years = humanize.intword(num / (per_second * seconds_in_year))

        message = (
            "If you generate {} ids per second, it would take {} years of work to "
            "have a {}% chance of at least one collision").format(
                per_second, years, probability * 100)

        return (message, )
Ejemplo n.º 14
0
def top(name, limit=None, id=None):
    curs = g.db.cursor(cursor_factory=psycopg2.extras.DictCursor)
    if name == "topkills":
        curs.execute(
            """select killvictim.*, killlist.price from killvictim, killlist where killvictim.killid = killlist.killid and killlist.time > now() - interval '7 days' order by killlist.price desc limit 5"""
        )
    elif name == "toppods":
        curs.execute(
            """select killvictim.*, killlist.price from killvictim, killlist where killvictim.killid = killlist.killid and killlist.time > now() - interval '7 days' and killvictim.shiptypeid = 670 order by killlist.price desc limit 5"""
        )
    retVal = []
    for kill in curs:
        data = itemMarketInfo(kill['shiptypeid'])
        retVal.append({
            "killid": kill['killid'],
            "pilotname": kill['charactername'],
            "pilotid": kill['characterid'],
            "shipname": data['itemName'],
            "shipid": kill['shiptypeid'],
            "iskloss": humanize.intword(int(kill['price']))
        })
    return retVal
Ejemplo n.º 15
0
def format_currency(amount,
                    currency="GBP",
                    humanize_=True,
                    int_format="{:,.0f}",
                    abbreviate=False):
    abbreviations = {
        "million": "M",
        "billion": "bn",
        "thousand": "k",
    }

    if humanize_:
        amount_str = humanize.intword(amount).split(" ")
        if amount < 1000000 and amount > 1000:
            chopped = amount / float(1000)
            amount_str = ["{:,.1f}".format(chopped), "thousand"]

        if len(amount_str) == 2:
            return (
                babel.numbers.format_currency(
                    float(amount_str[0]),
                    currency,
                    format="¤#,##0.0",
                    currency_digits=False,
                    locale="en_UK",
                ),
                abbreviations.get(amount_str[1], amount_str[1])
                if abbreviate else amount_str[1],
            )

    return (
        babel.numbers.format_currency(amount,
                                      currency,
                                      format="¤#,##0",
                                      currency_digits=False,
                                      locale="en_UK"),
        "",
    )
Ejemplo n.º 16
0
def process_data(data,
                 fields=[
                     'rank', 'symbol', 'price_usd', 'percent_change_24h',
                     'percent_change_1h', 'market_cap_usd'
                 ],
                 currency='USD',
                 humanize=True):

    if currency.upper() != 'USD':
        pos = 0
        for field in fields:
            fields[pos] = field.replace('usd', currency.lower())
            pos += 1

    # Initialize structure
    tabulated_data = []
    tabulated_data.append(copy.copy(fields))  # Headers in position 0

    pos = 0
    for header in tabulated_data[0]:  # Headers in position 0
        good_header = difflib.get_close_matches(header,
                                                fields_good_name.keys())[0]
        tabulated_data[0][pos] = fields_good_name[good_header]
        if good_header in ['price', 'market_cap']:
            tabulated_data[0][pos] = tabulated_data[0][pos].replace(
                'USD', currency.upper())
        pos += 1

    for item in data:
        tab_item = []
        for field in fields:
            if humanize and re.search('market_cap*', field):
                tab_item.append(intword(int(float(item[field]))))
            else:
                tab_item.append(item[field])
        tabulated_data.append(copy.copy(tab_item))

    return tabulated_data
Ejemplo n.º 17
0
        def getArtistsInfo(self, id=None):

            artist = self.__client.artist(artist_id=id)

            followers = humanize.intword(artist['followers']['total'])
            followers_comma = humanize.intcomma(artist['followers']['total'])

            if str(followers).isdigit():

                followers = followers_comma

            return {
                'spotify': artist['external_urls']['spotify'],
                'uri': artist['uri'],
                'id': artist['id'],
                'name': artist['name'],
                'image': artist['images'][0]['url'],
                'popularity': artist['popularity'],
                'followers': artist['followers']['total'],
                'followers_display': followers,
                'followers_comma': followers_comma,
                'genres': artist['genres']
            }
Ejemplo n.º 18
0
 def get_stocks_email(stock_list: list) -> dict:
     stock_list = YahooFinanceInterface.__validate_stock_code(stock_list)
     output_dict = {}
     for stock_code in stock_list:
         stock_info = yf.Ticker(stock_code).info
         output_dict[stock_code] = {
             'longName': stock_info.get('longName', 'N/A'),
             'previousClose':
             f"{stock_info.get('currency', 'N/A')} {stock_info.get('previousClose', 'N/A')}",
             'open':
             f"{stock_info.get('currency', 'N/A')} {stock_info.get('open', 'N/A')}",
             'dayRange':
             f"{stock_info.get('currency', 'N/A')} {stock_info.get('dayLow', 'N/A')}-{stock_info.get('dayHigh', 'N/A')}",
             'marketCap':
             f"{stock_info.get('currency', 'N/A')} {humanize.intword(stock_info.get('marketCap', 'N/A'))}",
             'beta': f"{stock_info.get('beta', 'N/A')}",
             'PE(Trailing/Forward)':
             f"{stock_info.get('trailingPE', 'N/A')} / {stock_info.get('forwardPE', 'N/A')}",
             'EPS(Trailing/Forward)':
             f"{stock_info.get('trailingEps', 'N/A')} / {stock_info.get('forwardEps', 'N/A')}",
             'volume': humanize.intword(stock_info.get('volume', 'N/A'))
         }
     return output_dict
Ejemplo n.º 19
0
def process_chunk(output, group):
    name = multiprocessing.current_process().name
    logger = logging.getLogger("ltv/{}".format(name))
    try:
        logger.debug("Processing a chunk to {}".format(output))
        tmp = map(lambda x: str(x[0]), filter(None, group))

        df_var = util.query_vertica_df(
            CLIENT_HIST_QUERY_REVENUE_VAR.format(client_list=str(tmp)[1:-1]))
        logger.debug("Loaded variable data from Vertica: {} rows".format(
            humanize.intword(df_var.size)))

        df_const = util.query_vertica_df(
            CLIENT_HIST_QUERY_REVENUE_CONST.format(client_list=str(tmp)[1:-1]))
        logger.debug("Loaded constant data from Vertica: {} rows".format(
            humanize.intword(df_const.size)))

        df = df_var.append(df_const, ignore_index=True)

        logger.debug("Finished loading data from Vertica: {} rows".format(
            humanize.intword(df.size)))

        # name columns
        df.columns = ["client_id", "activity_date", "Searches", "Revenue"]

        # Squeeze out a little for better memory usage
        df['Searches'] = pd.to_numeric(df['Searches'], downcast='unsigned')
        df['Revenue'] = pd.to_numeric(df['Revenue'], downcast='float')

        #The library functions require the activity date to be in a date format
        df['activity_date'] = pd.to_datetime(df['activity_date'],
                                             format='%Y-%m-%d')

        # This would be a big save, but must be done on input and sql is no good for that
        # Doing it here requires more RAM then we are likely to have, unfortunately.
        #df['client_id'] = df['client_id'].astype('category')

        logger.debug("Input Dataframe size : {}".format(
            humanize.naturalsize(df.memory_usage(deep=True).sum())))

        logger.debug("Computing clv table")

        df_final = generate_clv_table(df)
        df_final.customer_age = df_final.customer_age.astype(int)
        df_final.historical_searches = df_final.historical_searches.astype(int)
        df_final.days_since_last_active = df_final.days_since_last_active.astype(
            int)

        df_final = df_final[[
            'frequency', 'recency', 'customer_age', 'avg_session_value',
            'predicted_searches_14_days', 'alive_probability',
            'predicted_clv_12_months', 'historical_searches', 'historical_clv',
            'total_clv', 'days_since_last_active', 'user_status', 'calc_date'
        ]]

        logger.debug("Output Dataframe size : {}".format(
            humanize.naturalsize(df_final.memory_usage(deep=True).sum())))

        logger.debug("Writing to {}: {} rows".format(
            output, humanize.intword(len(df_final))))

        with write_lock:
            with open(output, 'a') as f:
                df_final.to_csv(f, sep='|', header=False, encoding='utf-8')

        logger.debug("Completed chunk")
    except Exception as e:
        logging.exception(e)
Ejemplo n.º 20
0
    async def updateCard(self, trainer):
        dailyDiff = await self.getDiff(trainer, 1)
        level = trainer.level
        embed = discord.Embed(timestamp=dailyDiff.new_date,
                              colour=int(
                                  trainer.team().colour.replace("#", ""), 16))
        try:
            embed.set_author(name=trainer.username,
                             icon_url=trainer.account().discord().avatar_url)
        except:
            embed.set_author(name=trainer.username)
        embed.add_field(name='Level', value=level.level)
        if level.level != 40:
            embed.add_field(name='XP',
                            value='{:,} / {:,}'.format(
                                trainer.update.xp - level.total_xp,
                                level.xp_required))
        else:
            embed.add_field(name='Total XP',
                            value='{}'.format(humanize.intword(
                                level.total_xp)))
        if dailyDiff.change_xp and dailyDiff.change_time:
            gain = '{:,} since {}. '.format(
                dailyDiff.change_xp, humanize.naturalday(dailyDiff.old_date))
            if dailyDiff.change_time.days > 1:
                gain += "That's {:,} xp/day.".format(
                    round(dailyDiff.change_xp / dailyDiff.change_time.days))
            embed.add_field(name='Gain', value=gain)
            if trainer.goal_daily and dailyDiff.change_time.days > 0:
                dailyGoal = trainer.goal_daily
                embed.add_field(name='Daily completion',
                                value='{}% towards {:,}'.format(
                                    pycent.percentage(
                                        dailyDiff.change_xp /
                                        max(1, dailyDiff.change_time.days),
                                        dailyGoal), dailyGoal))
        if trainer.goal_total and trainer.goal_total != 0:
            totalGoal = trainer.goal_total
        elif level.level < 40:
            totalGoal = trainerdex.Level.from_level(level.level + 1).total_xp
        else:
            totalGoal = None
        if totalGoal:
            totalDiff = await self.getDiff(trainer, 7)
            embed.add_field(name='Goal remaining',
                            value='{:,} out of {}'.format(
                                totalGoal - totalDiff.new_xp,
                                humanize.intword(totalGoal)))
            if totalDiff.change_time.seconds >= 1:
                eta = lambda x, y, z: round(x / (y / z))
                eta = eta(totalGoal - totalDiff.new_xp, totalDiff.change_xp,
                          totalDiff.change_time.total_seconds())
                eta = totalDiff.new_date + datetime.timedelta(seconds=eta)
                embed.add_field(name='Goal ETA',
                                value=humanize.naturaltime(
                                    eta.replace(tzinfo=None)))
            if totalDiff.change_time.total_seconds() < 583200:
                embed.description = "ETA may be inaccurate. Using {} of data.".format(
                    humanize.naturaldelta(totalDiff.change_time))
        embed.set_footer(text="Total XP: {:,}".format(dailyDiff.new_xp))

        return embed
Ejemplo n.º 21
0
 def _humanize_intword(obj):
     return humanize.intword(obj)
Ejemplo n.º 22
0
def _pretty(input):
    result = humanize.intword(input, format="%.2f")
    if len(result) > 50:
        return "Too damn many"
    return result
Ejemplo n.º 23
0
def table_html(self,
               limit=10,
               columns=None,
               title=None,
               full=False,
               truncate=True):
    """
    Return a HTML table with the dataframe cols, data types and values
    :param self:
    :param columns: Columns to be printed
    :param limit: How many rows will be printed
    :param title: Table title
    :param full: Include html header and footer
    :param truncate: Truncate the row information

    :return:
    """

    columns = parse_columns(self, columns)

    if limit is None:
        limit = 10

    if limit == "all":
        data = collect_as_dict(self.cols.select(columns))
    else:
        data = collect_as_dict(self.cols.select(columns).limit(limit))

    # Load the Jinja template
    template_loader = jinja2.FileSystemLoader(
        searchpath=absolute_path("/templates/out"))
    template_env = jinja2.Environment(loader=template_loader, autoescape=True)
    template = template_env.get_template("table.html")

    # Filter only the columns and data type info need it
    dtypes = []
    for i, j in zip(self.dtypes, self.schema):
        if i[1].startswith("array<struct"):
            dtype = "array<struct>"
        elif i[1].startswith("struct"):
            dtype = "struct"
        else:
            dtype = i[1]

        dtypes.append((i[0], dtype, j.nullable))

    # Remove not selected columns
    final_columns = []
    for i in dtypes:
        for j in columns:
            if i[0] == j:
                final_columns.append(i)

    total_rows = self.rows.approx_count()

    if limit == "all":
        limit = total_rows
    elif total_rows < limit:
        limit = total_rows

    total_rows = humanize.intword(total_rows)

    total_cols = self.cols.count()
    total_partitions = self.partitions()

    output = template.render(cols=final_columns,
                             data=data,
                             limit=limit,
                             total_rows=total_rows,
                             total_cols=total_cols,
                             partitions=total_partitions,
                             title=title,
                             truncate=truncate)

    if full is True:
        output = HEADER + output + FOOTER
    return output
Ejemplo n.º 24
0
def format_number(x):
    """Return readable string for `x`."""
    if 0 < x and x < 1:
        return '{x:1.2}'.format(x=x)
    return humanize.intword(x)
Ejemplo n.º 25
0
def get_line_of_code():
    repositoryList = run_query(repositoryListQuery.substitute(username=username, id=id))
    loc = LinesOfCode(id, username, ghtoken, repositoryList)
    yearly_data = loc.calculateLoc()
    total_loc = sum([yearly_data[year][quarter][lang] for year in yearly_data for quarter in yearly_data[year] for lang in yearly_data[year][quarter]])
    return humanize.intword(int(total_loc))
Ejemplo n.º 26
0
    def show_stats(self):
        url = ES_INDEX_STATS_URL.format(self.name)

        if self._exists is None:
            self.exists()

        if self._exists:
            try:
                resp = requests.get(url)
                if resp.ok and resp.json():
                    self._stats = {
                        'index': self.name,
                        'docs_count': resp.json().get('indices').get(self.name).get('total').get('docs').get('count'),
                        'docs_size': resp.json().get('indices').get(self.name).get('total').get('store').get('size_in_bytes')
                    }
                    log.info("Index Stats: [{}]. Docs: {}, Size: {}".format(self.name, intword(self._stats['docs_count']), 
                                                                      naturalsize(self._stats['docs_size'])))

                else:
                    log.error("Error while fetching stats. Index:{}, Code: {}, Msg: {}".format(self.name, resp.status_code, resp.content))
            except:
                log.exception("Exception while fetching stats. Index: {}".format(self.name))

        return self._stats
Ejemplo n.º 27
0
def prettyNumber2(value, format='%.0f'):
    #if (value >=1000 and value <=10000) or (value >=1000000 and value <=10000000):
    if value >= 1000000 and value <= 10000000:
        format = '%.1f'
    return humanize.intword(value, format)
Ejemplo n.º 28
0
def print_graph_info(graph: DiGraph):
    edges_count = humanize.intword(graph.number_of_edges())
    nodes_count = humanize.intword(graph.number_of_nodes())
    print("Graph contains {} edges for {} nodes".format(
        edges_count, nodes_count))
Ejemplo n.º 29
0
    async def corona_command(self,ctx,CountryName = None):
        try :
            if CountryName is None :
                CountryName = "dünya"

            translator = Translator()
            translation = translator.translate(CountryName)
            Country = translation.text.title()
            DataUrl = f"https://coronavirus-19-api.herokuapp.com/countries/{Country}"

            if Country == "World" :
                CountryFlag = "https://i.ibb.co/fVJyrgP/world.png"

            else :
                CountryFlag = f"https://www.countries-ofthe-world.com/flags-normal/flag-of-{Country}.png"

            stats = requests.get(DataUrl)
            json_stats = stats.json()
            Cases = json_stats["cases"]
            Recover = json_stats["recovered"]
            Deaths = json_stats["deaths"]
            humanize.i18n.activate("tr_TR")
            totalCases = humanize.intword(json_stats["cases"])
            todayCases = humanize.intword(json_stats["todayCases"])
            totalDeaths = humanize.intword(json_stats["deaths"])
            todayDeaths = humanize.intword(json_stats["todayDeaths"])
            recovered = humanize.intword(json_stats["recovered"])
            active = humanize.intword(json_stats["active"])
            critical = humanize.intword(json_stats["critical"])
            casesPerOneMillion = humanize.intword(json_stats["casesPerOneMillion"])
            totalTests = humanize.intword(json_stats["totalTests"])

            if totalCases == "0":
                totalCases = "Veri girişi yok"

            if todayCases == "0":
                todayCases = "Veri girişi yok"

            if totalDeaths == "0":
                totalDeaths = "Veri girişi yok"

            if todayDeaths == "0":
                todayDeaths = "Veri girişi yok"

            if recovered == "0":
                recovered = "Veri girişi yok"

            if active == "0":
                active = "Veri girişi yok"

            if critical == "0":
                critical = "Veri girişi yok"

            if casesPerOneMillion == "0":
                casesPerOneMillion = "Veri girişi yok"

            if totalTests == "0":
                totalTests = "Veri girişi yok"

            CountryNameTR = translator.translate(CountryName,dest="tr")
            TRCountry = CountryNameTR.text.title()
            coronaStatsEmbed = discord.Embed(title=f"{TRCountry} COVID-19 İstatistikleri", colour=0xffd500, timestamp=ctx.message.created_at)
            coronaStatsEmbed.add_field(name="Bugünkü Vaka", value=todayCases)
            coronaStatsEmbed.add_field(name="Bugünkü Ölüm", value=todayDeaths)
            coronaStatsEmbed.add_field(name="Toplam Vaka", value=totalCases)
            coronaStatsEmbed.add_field(name="Toplam Ölüm", value=totalDeaths)
            coronaStatsEmbed.add_field(name="Toplam Test", value=totalTests)
            coronaStatsEmbed.add_field(name="Toplam İyileşen", value=recovered)
            coronaStatsEmbed.add_field(name="Ağır Hasta", value=critical)
            coronaStatsEmbed.add_field(name="Aktif Vaka", value=active)
            coronaStatsEmbed.add_field(name="Bir Milyon Başına Vaka", value=casesPerOneMillion)

            labels = ['İyileşen', 'Ölen','Aktif']
            quantity = [Recover/Cases, Deaths/Cases,(Cases-Deaths-Recover)/Cases]
            explodeValue = 0.2

            if max(quantity) >= 0.9 :
                explodeValue = 0.4

            elif max(quantity) >= 0.8 :
                explodeValue = 0.3

            colors = ['green', 'orangered','coral']
            explode = (explodeValue, explodeValue, explodeValue)
            coronaplt.clf()
            coronaplt.figure(figsize=(6,4),facecolor="lightgray")
            coronaplt.pie(quantity,colors=colors, explode=explode, labels=labels, autopct='%1.1f%%',shadow=True, startangle=90)
            coronaplt.axis('equal')
            coronaplt.title("VAKALARIN DURUMLARI")
            coronaplt.savefig(f"{ctx.author.id}.png")

            file = discord.File(f"{ctx.author.id}.png", filename=f"{Country}_COVID-19.png")

            coronaStatsEmbed.set_image(url=f"attachment://{Country}_COVID-19.png")
            coronaStatsEmbed.set_thumbnail(url=CountryFlag)
            coronaStatsEmbed.set_footer(text=f"Tarafından: {ctx.author}",icon_url=ctx.author.avatar_url)

            await ctx.send(file=file,embed=coronaStatsEmbed)

            os.remove(f"{ctx.author.id}.png")

            logger.info(f"Requests | COVID-19 | Tarafından: {ctx.author}")
        except Exception as e:
            coronaStatsEmbed_2 = discord.Embed(title="Hata",description ="Bilinmeyen ülke adı ya da veri sunucusu yanıt vermiyor.",colour = 0xd92929)
            await ctx.send(embed=coronaStatsEmbed_2)

            logger.error(f"Requests | COVID-19 | Error: {e}")
Ejemplo n.º 30
0
 def intword_population(self):
     return humanize.intword(self.population)
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

__author__ = 'ipetrash'

# SOURCE: https://github.com/jmoiron/humanize

# pip install humanize
import humanize
import datetime as DT

# Localization. How to change locale in runtime
print(humanize.naturaltime(DT.timedelta(seconds=3)))  # '3 seconds ago'
print(humanize.intword(123455913))  # '123.5 million'
print(humanize.intword(12345591313))  # '12.3 billion'
print(humanize.intword(1339014900000))  # '1.3 trillion'
print(humanize.apnumber(4))  # 'four'
print(humanize.apnumber(7))  # 'seven'
print()

_t = humanize.i18n.activate('ru_RU')

print(humanize.naturaltime(DT.timedelta(seconds=3)))  # '3 секунды назад'
print(humanize.intword(123455913))  # '123.5 миллиона'
print(humanize.intword(12345591313))  # '12.3 миллиарда'
print(humanize.intword(1339014900000))  # '1.3 триллиона'
print(humanize.apnumber(4))  # 'четыре'
print(humanize.apnumber(7))  # 'семь'
print()

humanize.i18n.deactivate()
Ejemplo n.º 32
0
def welcome(ds, **kwargs):
    r = requests.get(
        "https://run.mocky.io/v3/f92cda62-ec3f-4edd-bb79-d59f6fa14dfd")
    print(humanize.intword(12345591313))
    return r.text
Ejemplo n.º 33
0
def format_using_humanize(val, format_type):
    if val != None:
        if format_type == humanize.intword:
            return humanize.intword(val)
    else:
        return None
Ejemplo n.º 34
0
def human_number(num):
    n = humanize.intword(int(math.ceil(num))).lower()
    if re.search(r"^(\d+)\.0 ([A-Za-z]+)$", n):
        m = re.search(r"^(\d+)\.0 ([A-Za-z]+)$", n)
        n = m.group(1) + " " + m.group(2)
    return n
Ejemplo n.º 35
0
def intword(context, value):
    return humanize.intword()
Ejemplo n.º 36
0
def intword(x: int) -> str:
    og_x = str(x)
    humanized_x = hstr.intword(x)

    return hstr.intcomma(x) if og_x == humanized_x else humanized_x
Ejemplo n.º 37
0
def shape(df):
    row, col = df.shape
    return '{} rows, {} cols'.format(intword(row), intword(col))
Ejemplo n.º 38
0
def build_streamer_json(stream, user_info):
    """Compile useful streamer information from a stream object.

    :param user: The username of the streamer
    :param stream: The complete stream JSON object
    :param participant_id: The user's Extra Life participant ID
    :return: A subset object of relevant streamer info
    """
    participant_id = user_info.get('EXTRALIFE')
    user = user_info.get('TWITCH')
    donate_url = 'https://www.extra-life.org/index.cfm?fuseaction=donorDrive.' \
                 'participant&participantID={}'.format(participant_id)
    s = {
        'dispname': user_info['NAME'],
        'username': user_info['TWITCH'],
        'playing': 'Offline',
        'viewers': 0,
        'url': 'https://www.twitch.tv/{}'.format(user),
        'preview': 'http://placehold.it/640x360',
        'participant_id': participant_id,
        'donate': donate_url if participant_id else None,
        'fps': 0,
        'views': 0,
    }

    if not stream['stream']:
        return s

    mapping = [(
        'pubg',
        'PUBG',
        "PLAYERUNKNOWN'S BATTLEGROUNDS",
    ), (
        'overwatch',
        'BLIZZARD',
        'Overwatch',
    ), (
        'rocketleague',
        'STEAM',
        'Rocket League',
    ), (
        'destiny2',
        'DESTINY2',
        'Destiny 2',
    )]

    for key, lookup, twitch_name in mapping:
        module = importlib.import_module('games.{}'.format(key))
        if user_info.get(lookup):
            if stream['stream'].get('game') != twitch_name:
                continue
            try:
                s[key] = module.stats(user_info[lookup])
            except KeyError as exc:
                s[key] = {}

    s['username'] = stream['stream']['channel']['display_name']
    s['playing'] = stream['stream']['game']
    s['viewers'] = humanize.intcomma(int(stream['stream']['viewers']))
    s['preview'] = stream['stream']['preview']['large']
    s['fps'] = stream['stream']['average_fps']
    s['views'] = humanize.intword(int(stream['stream']['channel']['views']))

    return s
Ejemplo n.º 39
0
    def columns(df, columns, buckets=40, infer=False, relative_error=1):
        """
        Return statistical information about a specific column in json format
        :param df: Dataframe to be processed
        :param columns: Columns that you want to profile
        :param buckets: Create buckets divided by range. Each bin is equal.
        :param relative_error: relative error when the percentile is calculated. 0 is more exact as slow 1 more error and faster
        :return: json object with the
        """

        columns = parse_columns(df, columns)

        # Get just a sample to infer the column data type
        # sample_size_number = sample_size(rows_count, 95.0, 2.0)
        # fraction = sample_size_number / rows_count
        # sample = df.sample(False, fraction, seed=1)

        # Initialize Objects
        columns_info = {}
        columns_info['columns'] = {}

        rows_count = df.count()
        columns_info['rows_count'] = humanize.intword(rows_count)
        count_dtypes = Profiler.count_data_types(df, columns, infer)

        columns_info["count_types"] = count_dtypes["count_types"]

        columns_info['size'] = humanize.naturalsize(df.size())

        # Cast columns to the data type infer by count_data_types()
        df = Profiler.cast_columns(df, columns, count_dtypes).cache()

        # Calculate stats
        stats = Profiler.general_stats(df, columns)

        for col_name in columns:
            col_info = {}
            logger.print("------------------------------")
            logger.print("Processing column '" + col_name + "'...")
            columns_info['columns'][col_name] = {}

            col_info["stats"] = stats[col_name]
            col_info.update(Profiler.frequency(df, col_name, buckets))
            col_info.update(
                Profiler.stats_by_column(col_name, stats, count_dtypes,
                                         rows_count))

            col_info['column_dtype'] = count_dtypes["columns"][col_name][
                'dtype']
            col_info["dtypes_stats"] = count_dtypes["columns"][col_name][
                'details']

            column_type = count_dtypes["columns"][col_name]['type']

            if column_type == "numeric":
                col_info["stats"].update(
                    Profiler.extra_numeric_stats(df, col_name, stats,
                                                 relative_error))
                col_info["hist"] = df.cols.hist(col_name,
                                                stats[col_name]["min"],
                                                stats[col_name]["max"],
                                                buckets)

            if column_type == "categorical" or column_type == "array":
                col_info["hist"] = Profiler.hist_string(df, col_name, buckets)

            if column_type == "date":
                col_info["hist"] = Profiler.hist_date(df, col_name)

            columns_info['columns'][col_name] = col_info

        return columns_info
Ejemplo n.º 40
0
 def __call__(self, x, pos=None):
     return humanize.intword(x, "%.3f")
Ejemplo n.º 41
0
def retrieve_miles_string(speed, total_days, description, class_="distance"):
    string = "You have traveled <span class='{2}'>{0}</span> miles {1}.\n".format(humanize.intword(total_days * speed * .62), description, class_)
    return string
Ejemplo n.º 42
0
 def _humanize_intword(obj):
     return humanize.intword(obj)
Ejemplo n.º 43
0
 def format_data(self, value):
     return humanize.intword(value, "%.3f")
Ejemplo n.º 44
0
# get detailed info for the movie, incl. revenue

movie = response.json()

# print(movie)

revenue = movie['revenue']


import humanize

#format revenue to be printed how we expect money to be printed (in millions and not a billion digits)


revenue_formatted = "$" + humanize.intword(revenue)
# revenue_formatted

# print(id)

# movie['original_title'] + " has earned " + revenue_formatted + " in revenue."
# 'Home Alone 3 has earned $79.1 million in revenue.'
twitter.update_status(status=movie['original_title'] + " has earned " + revenue_formatted + " in revenue.", in_reply_to_status_id_str=id)_

## expect in_reply_to_status_id_str to not be blank!!!!!

# {'contributors': None,
#  'coordinates': None,
#  'created_at': 'Thu Mar 29 01:30:02 +0000 2018',
#  'entities': {'hashtags': [], 'symbols': [], 'urls': [], 'user_mentions': []},
#  'favorite_count': 0,
Ejemplo n.º 45
0
def humanize_number(number):
    """Convert a number to a more human readable representation."""
    if len(str(number)) > 6:
        return humanize.intword(number)
    return humanize.intcomma(number)
Ejemplo n.º 46
0
def mem_usage(df):
    return intword(df.memory_usage(deep=True).sum())
Ejemplo n.º 47
0
 def garbage_collector(self, blobs_hashes):
     count_removed_cache, reclaimed_cache_space = remove_unnecessary_files(
         blobs_hashes, self._path)
     log.debug(output_messages['INFO_REMOVED_FILES'] %
               (humanize.intword(count_removed_cache), self._path))
     return count_removed_cache, reclaimed_cache_space
Ejemplo n.º 48
0
def criteria_defensive_investor(ticker_or_list,
                                relax_current_ratio=False,
                                verbose=False,
                                dt=datetime.today(),
                                force_download=False,
                                show_progress=True,
                                return_list_five=False):
    return_list = [
    ]  # if return_list_five is True, returns only the list of companies passing first 5 criteria;
    # otherwise, returns a full dataframe with data from all companies passed in ticker_or_list

    # The function accepts either a ticker of a list of tickers. This block treats this
    list_tickers = []
    if isinstance(ticker_or_list, str):
        list_tickers.append(ticker_or_list)
    else:
        list_tickers = ticker_or_list

    # Creates the empty dataframe, to be returned in case of return_list_five == False
    summary_columns = [
        'ticker', 'last_date', 'first_date', 'revenueusd', 'current_ratio',
        'positive_eps_p10yrs_count', 'dividend_distribution_p20yrs_count',
        'earnings_change_p10yrs', 'pe', 'pb', 'pexpb', 'size_criteria',
        'financial_condition_criteria', 'earnings_stability_criteria',
        'dividend_record_criteria', 'earnings_growth_criteria', 'pe_criteria',
        'pb_criteria', 'first_five_criteria', 'full_criteria'
    ]
    df_ = pd.DataFrame(columns=summary_columns)

    # If show_progress == True, the iterator show a progress bar. Otherwise, shows nothing. This block treats this
    def empty_func(x):
        return x

    if show_progress:
        func = tqdm
    else:
        func = empty_func

    # Main iterator
    for ticker in func(list_tickers):
        if verbose:
            print('\nTest for ' + ticker + ':')
        data = get_data(ticker, dt=dt, force_download=force_download)

        # In case of no data
        if data.shape[0] == 0:  # No data
            if verbose:
                print('- No data available')
            df_ = df_.append(pd.Series([float('NaN')] * len(summary_columns),
                                       index=summary_columns),
                             ignore_index=True)
            df_.at[df_.index[-1], 'ticker'] = ticker
            continue

        # Size criteria
        size_criteria = data['revenueusd'].values[-1] > 2000000000  #100000000
        if return_list_five and not size_criteria:  # If is just to return the list of companies passing first 5 criteria, and failed, goes to next iteration
            continue

        # Financial condition criteria
        # TODO: parameter relax_current_ratio should be replaced by a better way to treat current ratio of financial
        # companies
        if (data['currentratio'].values[-1] is not None) and (not np.isnan(
                data['currentratio'].values[-1])):
            current_ratio = data['currentratio'].values[-1]
            financial_condition_criteria = current_ratio > 2
        else:
            current_ratio = float('NaN')
            financial_condition_criteria = relax_current_ratio

        if return_list_five and not financial_condition_criteria:  # If is just to return the list of companies passing first 5 criteria, and failed, goes to next iteration
            continue

        # Earnings stability criteria
        earnings_stability_criteria = (data['eps'].tail(10) > 0).all()
        if return_list_five and not earnings_stability_criteria:  # If is just to return the list of companies passing first 5 criteria, and failed, goes to next iteration
            continue

        # Dividends record criteria
        dividend_record_criteria = (data['dps'].tail(20) > 0).all()
        if return_list_five and not dividend_record_criteria:  # If is just to return the list of companies passing first 5 criteria, and failed, goes to next iteration
            continue

        # Earnings growth criteria
        last_year = pd.to_datetime(data['calendardate'].values[-1]).year
        eps_0 = data[(data['calendardate'].dt.year > last_year - 13) & (
            data['calendardate'].dt.year <= last_year - 10)]['eps'].mean()
        eps_1 = data[(data['calendardate'].dt.year > last_year - 3) & (
            data['calendardate'].dt.year <= last_year)]['eps'].mean()
        earnings_growth_criteria = (np.float64(eps_1) / eps_0) > 1.33
        if return_list_five and not earnings_growth_criteria:  # If is just to return the list of companies passing first 5 criteria, and failed, goes to next iteration
            continue

        # Only first five criteria
        first_five_criteria = size_criteria and financial_condition_criteria and earnings_stability_criteria \
                              and dividend_record_criteria and earnings_growth_criteria

        # If is just to return the list of companies passing first 5 criteria, and failed, goes to next iteration
        if return_list_five and not first_five_criteria:
            continue
        # If is just to return the list of companies passing first 5 criteria, and success, adds to list AND goes to next iteration
        elif return_list_five and first_five_criteria:
            return_list.append(ticker)
            continue

        # P/E ratio criteria
        current_price = data['price'].values[-1]
        pe = current_price / eps_1
        pe_criteria = pe < 15

        # Price to Assets criteria
        pb = current_price / data['bvps'].values[-1]
        pb_criteria = pb < 1.5
        if (pe * pb < 22.5):
            pb_criteria = True

        # Full criteria
        full_criteria = size_criteria and financial_condition_criteria and earnings_stability_criteria \
                        and dividend_record_criteria and earnings_growth_criteria and pe_criteria and pb_criteria

        # Add to dataframe
        my_dic = {
            'ticker':
            ticker,
            'last_date':
            data['calendardate'].values[-1],
            'first_date':
            data['calendardate'].values[0],
            'revenueusd':
            data['revenueusd'].values[-1],
            'current_ratio':
            current_ratio,
            'positive_eps_p10yrs_count':
            data.tail(10)[data['eps'] > 0]['eps'].count(),
            'dividend_distribution_p20yrs_count':
            data.tail(20)[data['dps'] > 0]['dps'].count(),
            'earnings_change_p10yrs': (np.float64(eps_1) / eps_0),
            'pe':
            pe,
            'pb':
            pb,
            'pexpb':
            pe * pb,
            'size_criteria':
            size_criteria,
            'financial_condition_criteria':
            financial_condition_criteria,
            'earnings_stability_criteria':
            earnings_stability_criteria,
            'dividend_record_criteria':
            dividend_record_criteria,
            'earnings_growth_criteria':
            earnings_growth_criteria,
            'pe_criteria':
            pe_criteria,
            'pb_criteria':
            pb_criteria,
            'first_five_criteria':
            first_five_criteria,
            'full_criteria':
            full_criteria
        }
        df_.loc[len(df_)] = my_dic

        if verbose:
            print('- Size criteria: \t\t' + str(size_criteria) +
                  '\tRevenues of $' +
                  humanize.intword(data['revenueusd'].values[-1]) +
                  ' (threshold is $2 billion)')
            print('- Financial condition criteria: ' +
                  str(financial_condition_criteria) +
                  '\tCurrent ratio of %1.2f' % current_ratio +
                  ' (threshold is 2.0)')
            print('- Earnings stability criteria: \t' +
                  str(earnings_stability_criteria) +
                  '\tPositive earnings in %d of past 10 years' %
                  data.tail(10)[data['eps'] > 0]['eps'].count())
            print('- Dividend record criteria: \t' +
                  str(dividend_record_criteria) +
                  '\tDistribution of dividend in %d of past 20 years' %
                  data.tail(20)[data['dps'] > 0]['dps'].count())
            print('- Earnings growth criteria: \t' +
                  str(earnings_growth_criteria) +
                  '\tEarnings change of %+.0f%%' %
                  (100 * ((np.float64(eps_1) / eps_0) - 1)) +
                  ' in past 10 years (minimum is +33%)')
            print(
                '- Moderate P/E ratio criteria: \t' + str(pe_criteria) +
                '\tCurrent price is %1.1fx avg P3yrs earnings (limit is 15)' %
                pe)
            print(
                '- Moderate P/B ratio criteria: \t' + str(pb_criteria) +
                '\tCurrent price is %1.1fx last book value (limit 1.5), \n\t\t\t\t\tand PE * PB is %1.1f (limit 22.5)'
                % (pb, pe * pb))
            print('- Full criteria: \t\t' + str(full_criteria))

    if return_list_five:
        return return_list
    else:
        return df_
def process():
    table_name = "ExecucaoFinanceira"

    files = os.listdir('banco/data')
    data_file_path = None
    for f in files:
        fixex_table_name = extrai_table_name(f)[1]
        if fixex_table_name.startswith(table_name):
            data_file_path = os.path.join('banco/data', f)

    if not data_file_path:
        print('Nome errado de tabela')

    data_file = os.path.basename(data_file_path)
    meta_name = data_file.replace(".csv", "")
    table_name = extrai_table_name(meta_name)[1]
    table_timestamp = extrai_table_name(meta_name)[0]
    text_file = open(data_file_path, "r", encoding="utf-8")
    new_file = open("temp.csv", "wt")

    while True:
        data = text_file.read(4096)
        if not data:
            break
        data = data.replace("\r\n", "")
        new_file.write(data)

    new_file.flush()
    new_file.close()

    text_file = open("temp.csv", "r", encoding="utf-8")
    original_lines = 0
    for line in text_file.readlines():
        original_lines += 1

    new_file = open("temp.csv", "rt")
    reader = csv.reader(new_file, delimiter=';', quotechar='"', skipinitialspace=True)

    print("Dados de {0}".format(table_name))
    table = grafo[table_name]

    valid_lines = 0
    for line in reader:
        if len(line) == 1:
            continue
        line = fix_line(line, len(table._columns))
        try:
            table.add_data(line)
            valid_lines += 1
        except Exception:
            print("Erro na linha {}".format(valid_lines))

# 4397 - 3606 = 791
# 3634 - 3606 = 28
# 791 / 4397

    missing_lines = original_lines - valid_lines
    error = float(missing_lines/original_lines)
    error_str = "{:.2%}".format(error)
    print("Total: {}".format(original_lines))
    print("Parsed: {}".format(valid_lines))
    print("Error Rate: {:.2%}".format(error))

    line_count = 0
    query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato", "IdInstituicaoContratado")._from(table)
    for data in query:
        #print(data)
        line_count += 1
        pass
    print("Total de registros analisados : {} ({} perdidos)".format(line_count, missing_lines))

    query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato")._from(table)
    total_com_licitacao = sum(map(lambda x: x[2], filter(lambda x: x[1], query)))
    print("Execução Financeira com referência para Licitação: {}".format(
          total_com_licitacao))

    query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato")._from(table)
    total_sem_licitacao = sum(map(lambda x: x[2], filter(lambda x: not x[1], query)))
    print("Execução Financeira sem referência para Licitação: {}".format(
          total_sem_licitacao))

    query = select("IdExecucaoFinanceira", "IdLicitacao", "ValContrato")._from(table)
    total = sum(map(lambda x: x[2], query))
    print("Total dos gastos: {}".format(total))

    date_str = datetime.datetime.now().strftime("%d/%m/%Y")

    data = {
        'd_total': str(total),
        'd_total_sem_ref_lic': str(total_sem_licitacao),
        'd_total_com_ref_lic': str(total_com_licitacao),
        'total': humanize.intword(total).replace("billion", "bilhões"),
        'total_sem_ref_lic': humanize.intword(total_sem_licitacao).replace("billion", "bilhões"),
        'total_com_ref_lic': humanize.intword(total_com_licitacao).replace("billion", "bilhões"),
        'percentual_dados_desconsiderados': error_str,
        'atualizado': date_str
    }
    out = open("data.json", "wt")
    out.write(json.dumps(data))
    out.close()

    out = open("hist/data_{}.json".format(datetime.datetime.now().strftime("%Y%m%d")), "wt")
    out.write(json.dumps(data))
    out.close()
    print(data)