Exemplo n.º 1
0
def index_handler(page="index"):
    page = page.lower()
    data = utils.jsonapi(params, methods)
    #try:
    # Get HDD data for chart
    data['hdduse'] = utils.percentage(data['system.getDiskFreeSpace'], data['system.getDiskSize'])
    data['hddfree'] = 100-data['hdduse']
    # Get RAM data for chart
    data['usedram'] = utils.percentage(data['system.getJavaMemoryUsage'], data['system.getJavaMemoryTotal'])
    data['unusedram'] = 100-data['usedram']
    # Get RAM data for chart
    data['online'] = utils.percentage(data['getPlayerCount'], data['getPlayerLimit'])
    data['offline'] = 100-data['online']

    # Try to load skins.db
    if not os.path.isfile('skins.db'):
        # Never made the DB before...
        skindb = {}
    else:
        with open('skins.db','r') as f:
            skindb = json.loads(f.read())
    full_players = list(set(data['getPlayerNames'] + data['getOfflinePlayerNames']))
    for player in data['getPlayerNames']:
        if player not in skindb:
            # Never generated before...
            utils.gen_skin(player,16)
            skindb[player] = time.time()
            print 'Generating skin for %s' % player
        else:
            # Assume their name is in the DB.. check times!
            diff = int(time.time()) - int(skindb[player])
            if diff > 43200:
                utils.gen_skin(player,16)
                skindb[player] = time.time()
    with open('skins.db', 'w') as f:
        f.write(json.dumps(skindb,indent=4))
    # Here, we pull news posts from the forums. This is derp but it'll work...
    posts, uri = [], 'http://forum.wonderfulplanet.net/index.php?forums/news/index.rss'
    forum = xmltodict.parse(urllib2.urlopen(uri).read())['rss']['channel']
    for item in forum['item']:
        content = h().unescape(item['content:encoded'])
        if '...<br />' in content:
            content = content.split('...<br />',1)[0]
            content += ' [<a href="%s">Read more</a>]' % h().unescape(item['link'])
        posts.append({
                      'title': h().unescape(item['title']),
                      'author': h().unescape(item['author']),
                      'date': h().unescape(item['pubDate']),
                      'link': h().unescape(item['link']),
                      'content': content
                 })
    #except:
    #    return flask.abort(404)

    page += '.html'
    if os.path.isfile('templates/' + page):
        return flask.render_template(page, data=data, posts=posts)
    return flask.abort(404)
Exemplo n.º 2
0
 def summarize_run(self):
     '''Summarize the main events of this crawling run.'''
     elapsed = time.time() - self.start_time
     elapsed_str = str(datetime.timedelta(seconds=elapsed))
     indent = ' ' * 4
     print('\n\nTiming')
     print(indent + 'Time elapsed: {}.'.format(elapsed_str))
     print(indent + 'Time spent on HTTP requests: {0} or {1:.2f}%.'.
             format(str(datetime.timedelta(seconds=self.request_time)),
                    100 * self.request_time/elapsed))
     print(indent + 'Time spent saving to disk: {0} or {1:.2f}%.'.
             format(str(datetime.timedelta(seconds=self.disk_save_time)),
                    100 * self.disk_save_time/elapsed))
     print('Links and pages')
     print(indent + '{0}/{1} pages = ({2:d}%) stored to disk.'.
             format(self.count_saved, self.count_prospective_pages, 
                    utils.percentage(self.count_saved,
                                     self.count_prospective_pages)))
     print('Errors')
     print(indent + '{} pages discarded.'.format(self.count_discarded_pages))
     print(indent + '{} URLErrors.'.  format(self.urlerrors), end='')
     if self.urlerrors:
         print(indent + '''Consider running again until there are no '''
                 '''more URLErrors''')
     else:
         print('\n')
     with sqlite3.connect('crawl_' + url_core +
             '.db') as connection:
         cursor = connection.cursor()
         cursor = cursor.execute('''SELECT * FROM urls;''')
         print('{} unique records in database'.
                 format(len(cursor.fetchall())))
         cursor.close()
Exemplo n.º 3
0
 def summarize_run(self):
     '''Summarize the main events of this crawling run.'''
     elapsed = time.time() - self.start_time
     elapsed_str = str(datetime.timedelta(seconds=elapsed))
     indent = ' ' * 4
     print('\n\nTiming')
     print(indent + 'Time elapsed: {}.'.format(elapsed_str))
     print('Links and pages')
     print(indent + ('{} links added this run.'
                     .format(self.total_links_added)))
     print(indent + ('{} non-unique links ignored.'
                     .format(self.count_discarded_urls)))
     count_prospective_pages = (self.count_crawled_pages
                                + self.count_no_links_found_pages)
     percentage = utils.percentage(self.count_crawled_pages,
                                   count_prospective_pages)
     print(indent + ('{0}/{1} = ({2:d}%) pages successfully scraped for '
                     'links.'.format(self.count_crawled_pages,
                                     count_prospective_pages, percentage)))
     print('Errors')
     print(indent + ('{} pages discarded (no unique or usable links found).'
                     .format(self.count_no_links_found_pages)))
     with sqlite3.connect('crawl_' + url_core + '.db') as connection:
         cursor = connection.cursor()
         cursor = cursor.execute('''SELECT * FROM urls;''')
         print('{} unique records in database'
               .format(len(cursor.fetchall())))
         cursor.close()
Exemplo n.º 4
0
def make_basic_attributes_webpage(alist):
    def _numerator(time_signature):
        return time_signature.split("/")[0]
    
    def print_attribute(attribute, collection, out):
        counter = Counter([getattr(song, attribute) for song in collection])
        out.write(rst_table(utils.percentage(counter)))
        out.write("\n\n")
        
    with codecs.open("../docs/basic-attributes.rst", 'w', encoding="utf-8") as out:
        out.write(rst_header(u"Basic Attributes", 1))

        for name, collection in alist:
            out.write(rst_header(name, 2))

            out.write("Number of songs: {0}\n\n".format(len(collection)))

            out.write(rst_header("Time Signature", 3))
            counter = Counter([_numerator(song.time_signature) for song in collection])
            out.write(rst_table(utils.percentage(counter)))
            out.write("\n\n")

            out.write(rst_header("Mode", 3))
            print_attribute("mode", collection, out)

            out.write(rst_header("Ambitus in semitones", 3))
            print_attribute("ambitus", collection, out)

            out.write(rst_header("Pickup", 3))
            out.write("1 if has pickup\n\n")
            print_attribute("has_pickup", collection, out)
Exemplo n.º 5
0
 def run_conversion(self, src_file_path):
     '''Starts a conversion subprocess for a given source'''
     dst_file_path = splitext(src_file_path)[0] + '.converting.mp4'
     final_dst_file_path = splitext(src_file_path)[0] + '.mp4'
     log_file_path = splitext(src_file_path)[0] + '.conversion.log'
     error_file_path = splitext(src_file_path)[0] + '.conversion.error'
     try:
         self.conversion = Conversion(src_file_path, dst_file_path,
                                      log_file_path)
         self.conversion.start()
         converting = True
     except (StopIteration, MediaInfoError):
         print("Error, failed to start conversion of {}".format(
             src_file_path))
         converting = False
     while converting:
         try:
             elapsed = str(self.conversion.elapsed())
             eta = str(self.conversion.eta())
             output_size = human_readable_size(
                 self.conversion.output_size())
             progress = percentage(self.conversion.progress())
             if output_size is not None and progress is not None:
                 output_str = "Converting [{}]: {} Progress {} ETA: {}\r".format(
                     elapsed, output_size, progress, eta)
                 sys.stdout.write(output_str)
             sleep(0.5)
             sys.stdout.flush()
         except psutil.NoSuchProcess:
             print()
             print("Conversion process ended...")
             break
     result = {
         'error': 'Conversion could not be started'
     } if not self.conversion else self.conversion.result()
     if 'error' in result:
         print("There was an error during conversion: {}".format(result))
         increment_error_counter(error_file_path)
         log_failed_conversion(log_file_path)
     elif getsize(dst_file_path) < 10000:
         print("There was an error during conversion: {} is too small...".
               format(dst_file_path))
         increment_error_counter(error_file_path)
         log_failed_conversion(log_file_path)
     elif not MediaInfo(dst_file_path).valid():
         print(
             "There was an error during conversion: {} media info is invalid"
             .format(dst_file_path))
         increment_error_counter(error_file_path)
         log_failed_conversion(log_file_path)
     else:
         remove(src_file_path)
         rename(dst_file_path, final_dst_file_path)
         log_successful_conversion(log_file_path)
Exemplo n.º 6
0
 def grade(self):
     """Attempts to grade the exam"""
     meta = self.exam_meta
     max_score = self.exam.max_score
     if self.should_grade:
         self.score = self.exam.get_score(meta)
         percent = percentage(self.score, max_score)
         self.score_str = f"{self.score} / {max_score} ({percent:.1f}%)"
     else:
         self.score = NO_SCORE
         self.score_str = f"? / {max_score}"
Exemplo n.º 7
0
	def classify_reviews(self, folderpath):
		negc = 0
		posc = 0
		undc = 0
		total = 0
		filenames = glob.glob(folderpath)
		totalnumfiles = len(filenames)

		print("Testing data set")
		print("Number of files:{0}".format(totalnumfiles))

		if totalnumfiles == 0:
			print("No files to classify, exiting...")
			return

		for f in filenames:
			with open(f, 'r') as infile:
				clsfy = self.classify(infile)
				total += clsfy
				if clsfy > 0:
					posc += 1
				elif clsfy < 0:
					negc += 1
				elif clsfy == 0:
					undc += 1

		results = """
Results...
Average classification: {0}
Percentage of positve classifications: %{1}
Percentage of negative classifications: %{2}
Percentage of undertermined classifications: %{3}
		""".format(total/totalnumfiles,utils.percentage(posc, len(filenames)),utils.percentage(negc, len(filenames)),utils.percentage(undc, len(filenames)))
		print results

		utils.print_to_file("testResults.txt", "\nFolderpath: {0}\nNumber of Files: {1}\n{2}".format(folderpath,totalnumfiles,results))
Exemplo n.º 8
0
def display_question_stats(meta, question_stats):
    # format as:
    # question (question id)
    #   [0p]: 1 (10%)
    #   [1p]: 9 (90%)
    TEXT_LENGTH = 32
    click.echo("Per-question score stats:")
    for qid, scores in question_stats.items():
        question = meta.find_question(qid)
        click.echo(f"{short_str(question.text, TEXT_LENGTH)} ({qid})")
        total = sum(count for score, count in scores.items())
        for score, count in scores.items():
            percent = percentage(count, total)
            click.echo(f"\t[{score}p]: {count} ({percent:.1f}%)")
        click.echo()
    click.echo()
Exemplo n.º 9
0
def process_errors(error_logs):
    '''Take a list of error logs, and return its characteristics, ie percentage
    of errors related to merchant data, database failures etc.'''
    total = len(error_logs)
    if total == 0:
        # No errors, however, we still return a dictionary with every key, just
        # for consistency
        errors = [(category, 0) for category in ERROR_CATEGORIES]
        return errors

    def count_category(cat):
        """Return the number of logs that have category 'cat'."""
        return sum([(1 if log.category == cat else 0) for log in error_logs])

    errors = [(category, percentage(count_category(category), total))
              for category in ERROR_CATEGORIES]
    return errors
Exemplo n.º 10
0
def process_errors(error_logs):
    '''Take a list of error logs, and return its characteristics, ie percentage
    of errors related to merchant data, database failures etc.'''
    total = len(error_logs)
    if total == 0:
        # No errors, however, we still return a dictionary with every key, just
        # for consistency
        errors = [(category, 0) for category in ERROR_CATEGORIES]
        return errors

    def count_category(cat):
        """Return the number of logs that have category 'cat'."""
        return sum([(1 if log.category == cat else 0) for log in error_logs])

    errors = [(category, percentage(count_category(category), total))
              for category in ERROR_CATEGORIES]
    return errors
Exemplo n.º 11
0
def grade_exam(meta, path, question_callback) -> (int, int, float, str):
    """
    Utility function that grades a single exam
    Returns
        A tuple of (score, max_score, percentage, user_name)
    """
    try:
        exam = load_exam(path)
        if not exam.was_user_completed:
            raise Exception('Not completed')
        meta.ensure_exam_compatibility(exam)
        score = exam.get_score(meta, question_callback)
        max_score = exam.max_score
        percent = percentage(score, max_score)
        return score, max_score, percent, exam.user_name
    except Exception as ex:
        click.echo(f"{os.path.basename(path)}: {ex}")
    return None
Exemplo n.º 12
0
def linear_grader(question_meta, question) -> int:
    """
    Question grader that grants points proportionally
    to the number of correct answers.
    The score is set to zero if a wrong answer was selected
    """
    if question_meta.id != question.id:
        return 0
    correct_answers = 0
    all_correct_answers = 0
    for answer in question.answers:
        meta = question_meta.find_answer(answer.id)
        if not meta or (answer.is_selected and not meta.is_correct):
            return 0
        if meta.is_correct:
            all_correct_answers += 1
            if answer.is_selected:
                correct_answers += 1

    ratio = percentage(correct_answers, all_correct_answers) / 100.0

    return math.floor(question.points * ratio)
Exemplo n.º 13
0
 def print_attribute(attribute, collection, out):
     counter = Counter([getattr(song, attribute) for song in collection])
     out.write(rst_table(utils.percentage(counter)))
     out.write("\n\n")
Exemplo n.º 14
0
async def make_player_card(player_doc):
    """Generate and return a team card (as a discord.py-compatible image).
    
    - `player_doc` is a player document."""
    def draw_std(x, y, text, font="m"):
        #looool
        draw.text((x, y),
                  str(text), (255, 255, 255),
                  font=fonts[font],
                  align='center',
                  anchor="mm")

    stat = player_doc['cached']

    player_card_base_img_fp = "src/static/playercard.png"
    img = Image.open(player_card_base_img_fp, mode='r')

    draw = ImageDraw.Draw(img)

    #header
    draw_std(640, 65, player_doc["user_name"], "l")  #player
    draw_std(640, 105, player_doc["team_name"])  #team name

    #average accuracy
    draw_std(185, 218, percentage(stat["average_acc"]))
    draw_std(185, 245, "#" + str(stat["acc_rank"]), "s")

    #average score
    draw_std(640, 218, comma_sep(stat["average_score"]))
    draw_std(640, 245, "#" + str(stat["score_rank"]), "s")

    #average contrib
    draw_std(1106, 218, percentage(stat["average_contrib"]))
    draw_std(1106, 245, "#" + str(stat["contrib_rank"]), "s")

    #stat row
    draw_std(104, 335, stat['maps_played'])  #playcount
    if stat['maps_played'] != 0:
        wr_str = str(stat["maps_won"]) + "/" + str(
            stat['maps_lost']) + " (" + percentage(
                stat["maps_won"] / stat["maps_played"]) + ")"
    else:
        wr_str = "-"
    draw_std(311, 335, wr_str)  #w/r(wr%)
    draw_std(742, 335, comma_sep(stat["hits"]["300_count"]))  #300s
    draw_std(886, 335, comma_sep(stat["hits"]["100_count"]))  #100s
    draw_std(1028, 335, comma_sep(stat["hits"]["50_count"]))  #50s
    draw_std(1173, 335, comma_sep(stat["hits"]["miss_count"]))  #misss

    #table
    #x-dists: 180,345,548,702,840
    #y-dist: 39 each row starting from 526
    mods = ["NM", "HD", "HR", "DT", "FM"]
    for i in range(0, 5):
        row_pos = 526 + 39 * i
        mod_stat = stat["by_mod"][mods[i]]
        draw_std(180, row_pos, mod_stat["maps_played"])  #played
        if mod_stat["maps_played"] != 0:
            mod_wr_str = str(mod_stat["maps_won"]) + "/" + str(
                mod_stat['maps_lost']) + " (" + percentage(
                    mod_stat["maps_won"] / mod_stat["maps_played"]) + ")"
        else:
            mod_wr_str = "-"
        draw_std(345, row_pos, mod_wr_str)  #w/l (wr%)
        draw_std(548, row_pos,
                 comma_sep(mod_stat["average_score"]))  #average score
        draw_std(702, row_pos,
                 percentage(mod_stat["average_acc"]))  #average acc
        draw_std(840, row_pos, percentage(
            mod_stat["average_contrib"]))  #average contrib - unused for teams

    #pie chart
    #note: iterating over stat["by_mod"] works because dicts are insertion-ordered in python
    #since in db_manip we insert them in a certain order
    #otherwise the colors would be wrong if, for example, stat["by_mod"] returned the mod names
    #alphabetically ordered
    #you may want to hardcode the mod list instead of using stat["by_mod"] if the colors are jank
    if stat['maps_played'] != 0:
        data = [
            stat["by_mod"][mod_name]["maps_played"]
            for mod_name in stat["by_mod"]
        ]
        colors = ["#A5A5A5", "#FFC000", "#FF0000", "#00B0F0", "#92D050"]

        fig1, ax1 = plt.subplots(
            figsize=(3.5, 3.5))  #default is 100dpi, so 350px by 350px
        ax1.pie(data, colors=colors)
        ax1.axis('equal')

        #to binary and into pillow
        #https://stackoverflow.com/questions/8598673/how-to-save-a-pylab-figure-into-in-memory-file-which-can-be-read-into-pil-image/8598881
        plt_binary = io.BytesIO()
        plt.savefig(plt_binary, format='png', transparent=True)
        plt_binary.seek(0)
        plt_img = Image.open(plt_binary)

        #https://stackoverflow.com/questions/5324647/how-to-merge-a-transparent-png-image-with-another-image-using-pil
        #the alpha channel is used as the mask; transparent=True parameter actually saves as transparent
        img.paste(plt_img, (918, 382), plt_img)

    #you need to seek to 0 for it to work:
    #solution from here: #https://stackoverflow.com/questions/63209888/send-pillow-image-on-discord-without-saving-the-image
    #file-like object
    img_binary = io.BytesIO()
    img.save(img_binary, 'PNG')
    img_binary.seek(0)
    return img_binary
Exemplo n.º 15
0
def format_posts(data):
    # data is a list of dicts
    text = ''
    text += "TOP POSTS: by Total Engaged Minutes" + sngl_line
    for rank, item in enumerate(data, start=1):
        headline = item['Title'].title().replace('’T', '’t').replace(
            '’S', '’s').replace("'S", "'s").replace('’M',
                                                    '’m').replace('’R', '’r')
        author = item['Authors'].title()
        section = item['Section']
        mv = str(
            round(float(item['Engaged minutes']) / float(item['Visitors']), 2))
        v = utils.humanize_number(item['Visitors'], 1)
        r = (utils.percentage(item['Returning vis.'], item['Visitors']))
        so = (utils.percentage(item['Social refs'], item['Views']))
        se = (utils.percentage(item['Search refs'], item['Views']))
        o = (utils.percentage(item['Other refs'], item['Views']))
        i = (utils.percentage(item['Internal refs'], item['Views']))
        di = (utils.percentage(item['Direct refs'], item['Views']))
        mo = (utils.percentage(item['Mobile views'], item['Views']))
        de = (utils.percentage(item['Desktop views'], item['Views']))
        ta = (utils.percentage(item['Tablet views'], item['Views']))
        fb = (utils.percentage(item['Fb refs'], item['Social refs']))
        tw = (utils.percentage(item['Tw refs'], item['Social refs']))
        inte = utils.humanize_number(item['Social interactions'], 1)

        text += f'''{rank}. {headline}
By {author} in {section}
VISITORS: {mv} min/visitor, visitors: {v}, returning: {r}%
TRAFFIC %: social {so}, search {se}, other {o}, direct {di}, internal {i}
SOCIAL BREAKDOWN %: FB {fb}, Twitter {tw} | Interactions: {inte}
DEVICES %: mobile {mo}, desktop {de}, tablet {ta}

-----------------------------------------------------------\n'''
    return text
Exemplo n.º 16
0
def format_site(data, unit, ma):
    # data is a dict
    text = ''
    print(data['postv']['new'])
    if len(data['postv']['new']) > 6:
        a = (utils.humanize_number(data['postv']['new'], 1)).rjust(5)
    else:
        a = (utils.humanize_number(data['postv']['new'], 0)).rjust(5)
    b = (data['postv']['delta']).rjust(5)
    b2 = (data['postv']['kpi_delta']).rjust(5)
    c = (data['postv']['kpi_new']).ljust(4)
    if len(data['visitors']['new']) > 6:
        d = (utils.humanize_number(data['visitors']['new'], 1)).rjust(5)
    else:
        d = (utils.humanize_number(data['visitors']['new'], 0)).rjust(5)
    e = (data['visitors']['delta']).rjust(5)
    if len(data['minutes']['new']) > 6:
        f = (utils.humanize_number(data['minutes']['new'], 1)).rjust(5)
    else:
        f = (utils.humanize_number(data['minutes']['new'], 0)).rjust(5)
    g = (data['minutes']['delta']).rjust(5)
    g2 = (data['minutes']['kpi_new_ma_delta']).rjust(5)
    h = (data['minutes']['kpi_new']).ljust(4)
    i = utils.percentage(data['postv']['new'], data['pagev']['new'])
    j = data['traffic']['fb%'].rjust(2)
    k = data['traffic']['fb_ma%'].rjust(2)
    l = data['visitor_type']['new'].rjust(2)
    m = data['visitor_type']['new_ma%'].rjust(2)
    n = data['traffic']['tco%'].rjust(2)
    o = data['traffic']['tco_ma%'].rjust(2)
    p = data['visitor_type']['returning'].rjust(2)
    q = data['visitor_type']['returning_ma%'].rjust(2)
    r = data['traffic']['search%'].rjust(2)
    s = data['traffic']['search_ma%'].rjust(2)
    t = data['traffic']['other%'].rjust(2)
    u = data['traffic']['other_ma%'].rjust(2)
    v = data['traffic']['direct%'].rjust(2)
    w = data['traffic']['direct_ma%'].rjust(2)
    x = data['traffic']['internal%'].rjust(2)
    y = data['traffic']['internal_ma%'].rjust(2)
    z = data['devices']['mobile%'].rjust(2)
    aa = data['devices']['mobile_ma%'].rjust(2)
    bb = data['devices']['desktop%'].rjust(2)
    cc = data['devices']['desktop_ma%'].rjust(2)
    dd = data['devices']['tablet%'].rjust(2)
    ee = data['devices']['tablet_ma%'].rjust(2)
    if len(data['pagev']['new']) > 6:
        ff = (utils.humanize_number(data['pagev']['new'], 1)).rjust(5)
    else:
        ff = (utils.humanize_number(data['pagev']['new'], 0)).rjust(5)
    gg = (data['pagev']['delta']).rjust(5)
    gg2 = (data['pagev']['kpi_delta']).rjust(5)
    hh = (data['pagev']['kpi_new']).ljust(4)
    if len(data['visitors']['new_pages']) > 6:
        ii = (utils.humanize_number(data['visitors']['new_pages'], 1)).rjust(5)
    else:
        ii = (utils.humanize_number(data['visitors']['new_pages'], 0)).rjust(5)
    jj = (data['visitors']['total_delta']).rjust(5)
    if len(data['minutes']['new_pages']) > 6:
        kk = (utils.humanize_number(data['minutes']['new_pages'], 1)).rjust(5)
    else:
        kk = (utils.humanize_number(data['minutes']['new_pages'], 0)).rjust(5)
    ll = (data['minutes']['total_delta']).rjust(5)
    ll2 = (data['minutes']['kpi_pages_ma_delta']).rjust(5)
    mm = (data['minutes']['kpi_pages']).ljust(4)
    oo = unit.ljust(5)
    nn = data['posts']['new']
    pp = data['posts']['kpi_delta']
    qq = data['traffic']['fb_pv_ma%'].rjust(5)
    rr = data['traffic']['tco_pv_ma%'].rjust(5)
    ss = data['traffic']['search_pv_ma%'].rjust(5)
    tt = data['traffic']['other_pv_ma%'].rjust(5)
    uu = data['traffic']['direct_pv_ma%'].rjust(5)
    vv = data['traffic']['internal_pv_ma%'].rjust(5)
    ww = data['traffic']['fb_pv_diff'].rjust(6)
    xx = data['traffic']['tco_pv_diff'].rjust(6)
    yy = data['traffic']['search_pv_diff'].rjust(6)
    zz = data['traffic']['other_pv_diff'].rjust(6)
    aaa = data['traffic']['direct_pv_diff'].rjust(6)
    bbb = data['traffic']['internal_pv_diff'].rjust(6)

    text += f'''
=====================================
SITE       Posts   vs  | Pages   vs
DETAILS:    LW     MA% |  LW     MA%
-----------------------+-------------
Views     {a}  {b}   {ff}  {gg} 
Visitors  {d}  {e}   {ii}  {jj}
Minutes   {f}  {g}   {kk}  {ll}
-------------------------------------
PV/V/Day average this month: X.X
-------------------------------------
BOUNCE RATE: XX.X% home page
-------------------------------------
New posts: {nn}, vs MA%: {pp}
-------------------------------------
* Post views were {i}% of period's total page views
====================================
POST TRAFFIC:
As % :     LW  MA  |   ΔPV   vs MA
-------------------+---------------
Facebook   {j}  {k}  |  {ww}  {qq}
Twitter    {n}  {o}  |  {xx}  {rr}
Search     {r}  {s}  |  {yy}  {ss}
Other      {t}  {u}  |  {zz}  {tt}
Direct     {v}  {w}  |  {aaa}  {uu}
Internal   {x}  {y}  |  {bbb}  {vv}
=====================================
VISITORS:  LW  MA  | DEVICES: LW  MA
-------------------+-----------------
New        {l}  {m}  | Mobile   {z}  {aa}
Returning  {p}  {q}  | Desktop  {bb}  {cc}
                   | Tablet   {dd}  {ee}
=====================================
MA = moving average (prior 13 weeks)
Due to rounding, numbers may not add up to 100%
Google accounts for nearly all 'Search' views.
Google News, APIs account for most 'Other' views.
===================================================
RECENCY:
-------------------------------------------
   Return frequency      % of visits
-------------------------------------------

===================================================
GEO LOCATION: ... as % of page views
----------------------+--------------------
   Cities             |    Regions
----------------------+--------------------

'''
    return text
Exemplo n.º 17
0
def format_score(score, max_score) -> str:
    percent = percentage(score, max_score)
    return f"{score:.1f} / {max_score:.1f} ({percent:.1f}%)"
Exemplo n.º 18
0
async def make_map_best(score_docs, current_page, max_page, invoker_doc=None):
    """Generate and return a map leaderboard (as a discord.py-compatible image).
    
    - `score_docs` is an array of score documents, 10 or fewer, of the map's scores.
    This should be found through `db_get.get_top_map_scores()` prior to calling this function.
    - `current_page` is the page from `db_get`.
    - `max_page` is also the page from `db_get`.
    - `invoker_doc` should be the User document of the invoker if player is set.
    This calls a `db_get` function.
    
    `current_page` and `max_page` are used solely for the page indicator in the upper-left of
    the image."""
    def draw_std(x, y, text, font="m"):
        #looool
        draw.text((x, y),
                  str(text), (255, 255, 255),
                  font=fonts[font],
                  align='center',
                  anchor="mm")

    def to_standard_size(img):
        """Resize the image to 1280x720px. Unused."""
        width, height = img.size
        multiplier = max(1280 / width, 720 / height)
        resized = img.resize(
            (int(width * multiplier), int(height * multiplier)))
        cropped = resized.crop((0, 0, 1280, 720))
        return cropped

    def to_banner_size(img):
        """Resize banner to take up full width of the main image"""
        width, height = img.size
        multiplier = 1280 / width
        resized = img.resize(
            (int(width * multiplier), int(height * multiplier)))
        return resized

    def apply_gradient(img, gradient_start=0.3, gradient_duration=0.2):
        """Apply transparency gradient.

        - `gradient_start` should be between 0 and 1
        - `gradient_duration` should also be between 0 and 1 where
        `gradient_start+gradient_duration <= 1`, but other values do work
        
        from https://stackoverflow.com/questions/40948710/vertically-fade-an-image-with-transparent-background-to-transparency-using-pytho
        https://stackoverflow.com/questions/19235664/vertically-fade-image-to-transparency-using-python-pil-library/19235788#19235788"""
        im = img
        width, height = im.size
        pixels = im.load()
        for y in range(height):
            for x in range(width):
                initial_alpha = pixels[x, y][
                    3]  #iterating over every pixel, an rgba tuple (r,g,b,a)
                #take current pixel height and subtract by the complete height*gradient_start
                #height*gradient_start represents the pixel at which we start changing the opacity
                #if this value is negative, then the alpha remains for this pixel (as we are not at the gradient_start yet, and the output alpha is > initial_alpha)
                #if the value is nonnegative it represents the number of pixels after the gradient start
                #this is then divided by height to yield a decimal representing how far into the gradient duration we are
                #if this value is further than the gradient duration itself, then alpha evaluates to <0 and so we have already
                #passed the end of the gradient
                #since gradient duration is relative to the full image (not just the gradient part)
                #that is then divided by the gradient duration itself to yield an opacity multiplier
                #and we finally multiply by 255 to get the final opacity
                alpha = initial_alpha - int((y - height * gradient_start) /
                                            height / gradient_duration * 255)
                if alpha <= 0:
                    alpha = 0
                pixels[x, y] = pixels[x, y][:3] + (alpha,
                                                   )  #get rgb and append alpha
        for y in range(y, height):
            for x in range(width):
                pixels[x, y] = pixels[x, y][:3] + (0, )
        return im

    map_doc = await db_get.get_map_document(score_docs[0]["diff_id"])

    base_fp = "src/static/bg-std.png"
    base_img = Image.open(base_fp, mode='r')
    #base_img = Image.new("RGBA", (1280, 720), color="#000000")
    #base_img = Image.new("RGBA", (1280, 720))
    draw = ImageDraw.Draw(base_img)

    banner_fp = await image_handling.get_banner_fp(map_doc["set_id"])
    banner_img = Image.open(banner_fp, mode='r')
    banner_img = to_banner_size(banner_img)
    enhancer = ImageEnhance.Brightness(banner_img)
    banner_img_darkened = enhancer.enhance(0.45).convert("RGBA")
    banner_final = apply_gradient(banner_img_darkened)
    base_img.paste(banner_final, (0, 0), banner_final)

    grid_fp = "src/static/maplb-grid-base.png"
    #https://stackoverflow.com/questions/31273592/valueerror-bad-transparency-mask-when-pasting-one-image-onto-another-with-pyt
    grid_img = Image.open(grid_fp, mode='r').convert("RGBA")
    base_img.paste(grid_img, (0, 0), grid_img)

    #header
    draw_std(640, 65, "Top Scores", "l")  #static
    meta = map_doc["meta"]
    full_name = meta["map_artist"] + " - " + meta["map_song"] + " [" + meta[
        "map_diff"] + "]"
    draw_std(640, 105, full_name)  #full name

    #page number
    draw.text((36, 137),
              f"(page {current_page} of {max_page})", (255, 255, 255),
              font=fonts["s"],
              align='left',
              anchor="lm")

    #table
    #x-dists: 70,116(left),266,876,1035,1172
    #y-dist: 39 each row
    for row, score in enumerate(score_docs):
        y_pos = (row * 39) + 216
        draw_std(54, y_pos,
                 (current_page - 1) * 10 + row + 1)  #numerical ranking
        draw_std(214, y_pos, score["user_name"])  #player name
        draw_std(417, y_pos, comma_sep(score["score"]))  #score
        draw_std(561, y_pos, percentage(score["accuracy"]))  #acc
        hits = (
            f"{comma_sep(score['hits']['300_count'])}/{comma_sep(score['hits']['100_count'])}/"
            f"{comma_sep(score['hits']['50_count'])}/{comma_sep(score['hits']['miss_count'])}"
        )
        draw_std(722, y_pos, hits)  #hits
        draw_std(881, y_pos, comma_sep(score["combo"]) + "x")  #combo

    if invoker_doc["osu_id"]:
        #get player, get best score, check if rank of best score is already on this page
        #if not, do everything below
        score, rank, extra_count = await db_get.get_best_user_score(
            score_docs[0]["diff_id"], invoker_doc["osu_id"])
        if math.floor(rank / 10) != current_page - 1:
            y_pos = 645
            draw_std(54, y_pos - 39, "...")  #ellipsis
            draw_std(54, y_pos, rank)  #numerical ranking
            draw_std(214, y_pos, invoker_doc["osu_name"])  #player name
            draw_std(417, y_pos, comma_sep(score["score"]))  #score
            draw_std(561, y_pos, percentage(score["accuracy"]))  #acc
            hits = (
                f"{comma_sep(score['hits']['300_count'])}/{comma_sep(score['hits']['100_count'])}/"
                f"{comma_sep(score['hits']['50_count'])}/{comma_sep(score['hits']['miss_count'])}"
            )
            draw_std(722, y_pos, hits)  #hits
            draw_std(881, y_pos, comma_sep(score["combo"]) + "x")  #combo
            if extra_count > 0:
                draw.text((54, y_pos + 39),
                          f"(+{extra_count} more)", (255, 255, 255),
                          font=fonts["s"],
                          align='left',
                          anchor="lm")

    #i guess you have to seek before you actually do the thing
    #solution from here: #https://stackoverflow.com/questions/63209888/send-pillow-image-on-discord-without-saving-the-image
    img_binary = io.BytesIO()
    base_img.save(img_binary, 'PNG')
    img_binary.seek(0)
    return img_binary
Exemplo n.º 19
0
async def make_server_best(score_docs,
                           current_page,
                           max_page,
                           mod_filter=None,
                           category="score"):
    """Generate and return the best scores of the tournament as an image.
    
    - `score_docs` is an array of score documents, 10 or fewer, of the best scores.
    This should be found through `db_get.get_top_team_scores()` prior to calling this function.
    - `current_page` is the page from `db_get`.
    - `max_page` is also the page from `db_get`.
    - `mod_filter` is the mod these documents are filtered by, if applicable.
    - `category` is the leaderboard category these documents are sorted by, if applicable.
    
    `current_page` and `max_page` are used solely for the page indicator in the upper-left of
    the image."""
    def draw_std(x, y, text, font="m"):
        #looool
        draw.text((x, y),
                  str(text), (255, 255, 255),
                  font=fonts[font],
                  align='center',
                  anchor="mm")

    def truncate(text, font="m"):
        """Truncates long strings to the desired max width and adds an ellipsis if needed."""
        max_width = 373

        font = fonts[font]
        ellipsis_width, _ = font.getsize("...")

        width, _ = font.getsize(text)
        if width > max_width:
            while width > (max_width - ellipsis_width):
                text = text[:-1]
                width, _ = font.getsize(text)
            text += "..."
        return text

    #literally a copypaste of teambest lol

    #we change the category order based on the ordering
    #array 1 is the actual score doc references, array 2 is the table headers
    header_order = {
        "score": ["Score", "Acc", "Contrib"],
        "acc": ["Acc", "Score", "Contrib"],
        "contrib": ["Contrib", "Score", "Acc"]
    }

    player_card_base_img_fp = "src/static/serverbest.png"
    img = Image.open(player_card_base_img_fp, mode='r')

    draw = ImageDraw.Draw(img)

    #header
    draw_std(640, 65, f"Top Scores - {header_order[category][0]}",
             "l")  #static

    #table header
    header_font = ImageFont.truetype("src/static/Renogare-Regular.otf", 25)
    draw.text((916, 177),
              header_order[category][0], (255, 255, 255),
              font=header_font,
              align='center',
              anchor="mm")
    draw.text((1046, 177),
              header_order[category][1], (255, 255, 255),
              font=header_font,
              align='center',
              anchor="mm")
    draw.text((1176, 177),
              header_order[category][2], (255, 255, 255),
              font=header_font,
              align='center',
              anchor="mm")

    #page number
    page_text = f"(page {current_page} of {max_page})"
    if mod_filter:
        page_text += f" ({mod_filter})"
    draw.text((36, 137),
              page_text, (255, 255, 255),
              font=fonts["s"],
              align='left',
              anchor="lm")

    colors = {
        "NM": (165, 165, 165),
        "HD": (255, 192, 0),
        "HR": (255, 0, 0),
        "DT": (0, 176, 240),
        "FM": (146, 208, 80),
        "TB": (146, 208, 80)
    }
    #table
    #x-dists: 70,116(left),266,876,1035,1172
    #y-dist: 39 each row
    for row, score in enumerate(score_docs):
        map_doc = await db_get.get_map_document(score["diff_id"])

        header_order = {
            "score": [
                comma_sep(score["score"]),
                percentage(score["accuracy"]),
                percentage(score["contrib"])
            ],
            "acc": [
                percentage(score["accuracy"]),
                comma_sep(score["score"]),
                percentage(score["contrib"])
            ],
            "contrib": [
                percentage(score["contrib"]),
                comma_sep(score["score"]),
                percentage(score["accuracy"])
            ]
        }

        banner_fp = await image_handling.get_banner_fp(map_doc["set_id"])
        banner = Image.open(banner_fp, mode='r')
        banner = banner.resize((139, 37))

        y_pos = (row * 39) + 216

        draw_std(55, y_pos,
                 (current_page - 1) * 10 + row + 1)  #numerical ranking
        draw_std(205, y_pos, score["user_name"])  #player name
        #tuple refers to top-left corner, so half the banner's height is subtracted
        img.paste(banner, (321, y_pos - 19))  #map banner
        draw.line([461, y_pos - 19, 461, y_pos + 19],
                  colors[score["map_type"]], 5)  #modline
        meta = map_doc["meta"]
        full_name = meta["map_artist"] + " - " + meta[
            "map_song"] + " [" + meta["map_diff"] + "]"
        draw.text((471, y_pos),
                  truncate(full_name, "s"), (255, 255, 255),
                  font=fonts["s"],
                  align='left',
                  anchor="lm")  #map name
        draw_std(916, y_pos, header_order[category][0], "s")
        draw_std(1046, y_pos, header_order[category][1], "s")
        draw_std(1176, y_pos, header_order[category][2], "s")

    img_binary = io.BytesIO()
    img.save(img_binary, 'PNG')
    img_binary.seek(0)
    return img_binary
Exemplo n.º 20
0
async def make_player_best(score_docs,
                           current_page,
                           max_page,
                           mod_filter=None):
    """Generate and return a player score leaderboard (as a discord.py-compatible image).
    
    - `score_docs` is an array of score documents, 10 or fewer, of the player's scores.
    This should be found through `db_get.get_top_player_scores()` prior to calling this function.
    (This allows for mod filtering at the command level.)
    - `current_page` is the page from `db_get`.
    - `max_page` is also the page from `db_get`.
    - `mod_filter` is the mod these documents are filtered by, if applicable.
    
    `current_page` and `max_page` are used solely for the page indicator in the upper-left of
    the image."""
    def draw_std(x, y, text, font="m"):
        #looool
        draw.text((x, y),
                  str(text), (255, 255, 255),
                  font=fonts[font],
                  align='center',
                  anchor="mm")

    def truncate(text, font="m"):
        """Truncates long strings to the desired max width and adds an ellipsis if needed."""
        max_width = 510

        font = fonts[font]
        ellipsis_width, _ = font.getsize("...")

        width, _ = font.getsize(text)
        if width > max_width:
            while width > (max_width - ellipsis_width):
                #repeatedly cut off characters until we can shove the ellipsis in
                text = text[:-1]
                width, _ = font.getsize(text)
            text += "..."
        return text

    player_card_base_img_fp = "src/static/playerbest.png"
    img = Image.open(player_card_base_img_fp, mode='r')

    draw = ImageDraw.Draw(img)

    #header
    player_doc = await db_get.get_player_document(score_docs[0]["user_id"])
    draw_std(640, 65, player_doc["user_name"], "l")  #player
    draw_std(640, 105, player_doc["team_name"])  #team name

    #page number
    page_text = f"(page {current_page} of {max_page})"
    if mod_filter:
        page_text += f" ({mod_filter})"
    draw.text((36, 137),
              page_text, (255, 255, 255),
              font=fonts["s"],
              align='left',
              anchor="lm")

    colors = {
        "NM": (165, 165, 165),
        "HD": (255, 192, 0),
        "HR": (255, 0, 0),
        "DT": (0, 176, 240),
        "FM": (146, 208, 80),
        "TB": (146, 208, 80)
    }
    #table
    #x-dists: 70,116(left),266,876,1035,1172
    #y-dist: 39 each row, starting from 216
    for row, score in enumerate(score_docs):
        map_doc = await db_get.get_map_document(score["diff_id"])

        banner_fp = await image_handling.get_banner_fp(map_doc["set_id"])
        banner = Image.open(banner_fp, mode='r')
        banner = banner.resize((139, 37))

        y_pos = (row * 39) + 216

        draw_std(70, y_pos,
                 (current_page - 1) * 10 + row + 1)  #numerical ranking

        #tuple refers to top-left corner, so half the banner's height is subtracted
        img.paste(banner, (117, y_pos - 19))  #map banner
        draw.line([257, y_pos - 19, 257, y_pos + 19],
                  colors[score["map_type"]], 5)  #modline
        meta = map_doc["meta"]
        full_name = meta["map_artist"] + " - " + meta[
            "map_song"] + " [" + meta["map_diff"] + "]"
        draw.text((267, y_pos),
                  truncate(full_name, "s"), (255, 255, 255),
                  font=fonts["s"],
                  align='left',
                  anchor="lm")  #map name

        draw_std(876, y_pos, comma_sep(score["score"]))  #score
        draw_std(1035, y_pos, percentage(score["accuracy"]))  #acc
        draw_std(1172, y_pos, comma_sep(score["combo"]) + "x")  #combo

    img_binary = io.BytesIO()
    img.save(img_binary, 'PNG')
    img_binary.seek(0)
    return img_binary
Exemplo n.º 21
0
async def make_averaget_best(team_docs,
                             current_page,
                             max_page,
                             category,
                             invoker_doc=None):
    """Generate and return the best teams of the tournament in a certain category as an image.
    
    - `team_docs` is an array of player documents, 10 or fewer, of the map's scores.
    This should be found through `db_get.get_top_team_scores()` prior to calling this function.
    - `current_page` is the page from `db_get`.
    - `max_page` is also the page from `db_get`.
    - `category` should be the leaderboard category.
    - `invoker_doc` should be the User document of the command invoker if player is set.
    Calls a `db_get` function.
    
    `current_page` and `max_page` are used solely for the page indicator in the upper-left of
    the image."""
    def draw_std(x, y, text, font="m"):
        #looool
        draw.text((x, y),
                  str(text), (255, 255, 255),
                  font=fonts[font],
                  align='center',
                  anchor="mm")

    headers = {
        "score": ["Score", "Acc"],
        "acc": ["Acc", "Score"],
    }

    base_img_fp = "src/static/averagelb.png"
    img = Image.open(base_img_fp, mode='r')

    draw = ImageDraw.Draw(img)

    #header
    draw_std(640, 65, f"Best Teams - {headers[category][0]}", "l")  #team name

    #page number
    page_text = f"(page {current_page} of {max_page})"
    draw.text((36, 137),
              page_text, (255, 255, 255),
              font=fonts["s"],
              align='left',
              anchor="lm")

    #table header
    header_font = ImageFont.truetype("src/static/Renogare-Regular.otf", 25)
    draw.text((259, 177),
              "Team", (255, 255, 255),
              font=header_font,
              align='center',
              anchor="mm")
    draw.text((487, 177),
              headers[category][0], (255, 255, 255),
              font=header_font,
              align='center',
              anchor="mm")
    draw.text((648, 177),
              headers[category][1], (255, 255, 255),
              font=header_font,
              align='center',
              anchor="mm")

    #table
    #x-dists: 70,116(left),266,876,1035,1172
    #y-dist: 39 each row
    for row, team in enumerate(team_docs):
        stat = team["cached"]
        header_order = {
            "score": [
                comma_sep(stat["average_score"]),
                percentage(stat["average_acc"])
            ],
            "acc": [
                percentage(stat["average_acc"]),
                comma_sep(stat["average_score"])
            ]
        }

        y_pos = (row * 39) + 216
        draw_std(62, y_pos,
                 (current_page - 1) * 10 + row + 1)  #numerical ranking
        draw_std(259, y_pos, team["_id"])  #player name
        draw_std(487, y_pos, header_order[category][0])
        draw_std(648, y_pos, header_order[category][1])

    if invoker_doc["osu_id"]:
        team_doc = await db_get.get_team_document(invoker_doc["team_name"])
        stat = team_doc["cached"]
        header_order = {
            "score": [
                comma_sep(stat["average_score"]),
                percentage(stat["average_acc"])
            ],
            "acc": [
                percentage(stat["average_acc"]),
                comma_sep(stat["average_score"])
            ],
        }
        invoker_rank = {
            "score": stat["score_rank"],
            "acc": stat["acc_rank"],
        }
        if math.floor(invoker_rank[category] / 10) != current_page - 1:
            y_pos = 645
            draw_std(62, y_pos - 39, "...")  #ellipsis
            draw_std(62, y_pos, invoker_rank[category])  #numerical ranking
            draw_std(259, y_pos, team_doc["_id"])  #player name
            draw_std(487, y_pos, header_order[category][0])
            draw_std(648, y_pos, header_order[category][1])

    #i guess you have to seek before you actually do the thing
    #solution from here: #https://stackoverflow.com/questions/63209888/send-pillow-image-on-discord-without-saving-the-image
    img_binary = io.BytesIO()
    img.save(img_binary, 'PNG')
    img_binary.seek(0)
    return img_binary
Exemplo n.º 22
0
def test_percentage():
    assert percentage(0, 0) == 100
    assert percentage(1, 2) == 50