def dummy_backup_db(dbx, dirname): """ Primitivni zaloha db do adresare dirname """ print("(dummy) database backup to %s" % dirname) ensure_dir(dirname) s = func.clsMyStat(dbx, '') for stat in s.getAllStats(): stat_object = func.clsMyStat(dbx, stat) r = stat_object.getLastValues(0) r = ["\t".join(('{0:%d.%m.%Y}'.format(x[0]), str(x[1]))) for x in r] func.writefile("\n".join(r), "%s/%s" % (dirname, stat))
def dummy_backup_db(dbx, dirname): """ Primitivni zaloha db do adresare dirname """ print("(dummy) database backup to %s" % dirname) func.makedir(dirname) # hack kvuli filenotfounderror na dalsim radku shutil.rmtree(dirname) func.makedir(dirname) s = func.clsMyStat(dbx, '') for stat in s.getAllStats(): stat_object = func.clsMyStat(dbx, stat) r = stat_object.getLastValues(0) r = ["\t".join(('{0:%d.%m.%Y}'.format(x[0]), str(x[1]))) for x in r] func.writefile("\n".join(r), "%s/%s" % (dirname, stat))
def make_csv(dbx, dirname): """ dumpuje vsechny statistiky jako CSV soubory do adresare dirname """ ensure_dir(dirname) s = func.clsMyStat(dbx, '') for stat in s.getAllStats(): print(stat) r = func.clsMyStat(dbx, stat).getLastValues(0) #r = stat_object.getLastValues(0) csv_rows = [ "%s;%s;%s;" % (stat, "{:%d.%m.%Y}".format(x[0]), x[1]) for x in r ] func.writefile("stat_id;date;value;\n" + "\n".join(csv_rows), "%s/%s.csv" % (dirname, stat))
def make_pages(dbx, dirname): """ Nageneruj stranky a obrazky do adresare dirname """ def add_stat_to_group(groups, groupname, statid): try: groups[groupname].append(statid) except KeyError: groups[groupname] = [statid] def stat_min_date(stat): ''' vrat nejmensi datum v datove rade statistiky stat = [ (datum, hodnota), (datum, hodnota) ...] ''' return min(func.lmap(lambda x: x[0], stat)) if stat else None def stat_max_date(stat): ''' obdobne vrat nejvetsi datum ''' return max(func.lmap(lambda x: x[0], stat)) if stat else None # priprava adresare try: shutil.rmtree(dirname) except: pass try: func.makedir(dirname) except: pass try: func.makedir(dirname + "/img") except: pass s = func.clsMyStat(dbx, '') stats = s.getAllStats() i, statnames, statnames_index, groups = 0, {}, {}, {} # vytvor seznam vsech generovanych grafu: mixed_graphs = {} # pridej automaticky vytvareny seznam nejvice tweetujicich uzivatelu best_twitters = {} for stat in stats: if re.search(r'TWITTER_(.+?)_TWEETS', stat): mystat = Stat(stat, get_stat_for_graph(dbx, stat)) best_twitters[stat] = mystat.max() sorted_twitters = sorted(best_twitters.items(), key=operator.itemgetter(1))[-7:] stat_id = 'BEST_TWITTERS' mixed_graphs[stat_id] = [x[0] for x in sorted_twitters] add_stat_to_group(groups, 'Porovnání', stat_id) # 1) nacti ty z konfigurace, preved na hashtabulku for line in func.getconfig('config/graphs'): lineparts = func.lmap(str.strip, line.split(' ')) mixed_graphs[lineparts[0]] = lineparts[1:] statnames[lineparts[0]] = lineparts[0] add_stat_to_group(groups, 'Porovnání', lineparts[0]) # 2) pridej automaticky vytvarene twitter kombinovane grafy # TWEETS, FOLLOWERS a LIKES for stat in stats: found = re.search(r'TWITTER_(.+?)_TWEETS', stat) if found: statid = "TWITTER_%s" % found.group(1) mixed_graphs[statid] = [ stat, "TWITTER_%s_FOLLOWERS" % found.group(1), "TWITTER_%s_LIKES" % found.group(1) ] statnames[statid] = "Twitter %s" % found.group(1) # default jmeno statnames_index[statid] = "%s" % found.group( 1) # default jmeno na titulni stranku add_stat_to_group(groups, 'Twitteři', statid) # 3) pridej vsechny ostatni statistiky, vynechej TWITTERY # vytvor ponekud nesystemove defaultni nazvy for stat in stats: if not re.search(r'TWITTER_(.+)', stat): mixed_graphs[stat] = [stat] found = re.search(r'BALANCE_(.+)', stat) if found: statnames[stat] = "Zůstatek %s" % found.group(1) add_stat_to_group(groups, 'Finance', stat) continue found = re.search(r'PI_MEMBERS_(.+)', stat) if found: statnames[stat] = "Počet členů %s" % found.group(1) add_stat_to_group(groups, 'Členové', stat) continue found = re.search(r'YOUTUBE_(.+)', stat) if found: statnames[stat] = "Youtube %s" % found.group(1) add_stat_to_group(groups, 'Youtube', stat) continue found = re.search(r'PP_(.+)', stat) if found: add_stat_to_group(groups, 'Finanční tým', stat) continue found = re.search(r'REDMINE_(.+)', stat) if found: add_stat_to_group(groups, 'Odbory a složky strany na Redmine', stat) continue add_stat_to_group(groups, 'Ostatní', stat) # donacti jmena statistik z konfigurace for line in func.getconfig('config/statnames'): try: (a, b) = line.split('\t', 2) statnames[a] = b except ValueError: pass # titulni stranka & assets mybody = "" for groupname in groups: paragraph = [] for statid in groups[groupname]: if statid in statnames_index.keys(): statname = statnames_index[statid] elif statid in statnames.keys(): statname = statnames[statid] else: statname = statid paragraph.append(html.a("%s.delta.htm" % statid, statname)) paragraph.sort() mybody += html.h2(groupname) + html.p(",\n".join(paragraph)) page = func.replace_all( func.readfile('templates/index.htm'), { '%body%': mybody, '%stat_date%': '{0:%d.%m.%Y %H:%M:%S}'.format( datetime.datetime.now()) }) func.writefile(page, "%s/index.htm" % dirname) shutil.copytree('templates/assets', "%s/assets" % dirname) # Vytvor vsechny kombinovane grafy, vynech statistiky s nejvyse jednou hodnotou for statid in mixed_graphs: if arg('s') and statid != arg('s'): continue i += 1 # graf involved_stats, involved_deltas = {}, {} statInstances = [] for invstat in mixed_graphs[statid]: tmpstat = get_stat_for_graph(dbx, invstat) involved_stats[invstat] = tmpstat statInstances.append(Stat(invstat, involved_stats[invstat])) # spocitej delta statistiku deltastat, lastvalue = [], None for entry in tmpstat: deltastat.append([ entry[0], 0 if lastvalue is None else entry[1] - lastvalue ]) lastvalue = entry[1] involved_deltas[invstat] = deltastat singlestat = (len(involved_stats.values()) == 1) if max(func.lmap(len, involved_stats.values( ))) > 0: # involved_stats musi obsahovat aspon 1 radu o >=1 hodnotach print("[%s/%s]: Creating %s \r" % (i, len(mixed_graphs), statid), end='\r') # zakladni a delta graf make_graph(involved_stats, "%s/img/%s.png" % (dirname, statid), delta=False) make_graph(involved_deltas, "%s/img/%s.delta.png" % (dirname, statid), delta=True) # metody ziskani dat method_list = "" for stat in involved_stats: try: desc = involved_stats[stat][-1:][0][2] except IndexError: desc = "Neznámá metoda" method_list += "%s: %s<br>" % (stat, desc) # html stranka statname = statnames[statid] if statid in statnames.keys( ) else statid min_date = min( func.lmap(stat_min_date, filter(lambda x: x, involved_stats.values()))) # rozsah dat max_date = max( func.lmap(stat_max_date, filter(lambda x: x, involved_stats.values()))) bottom_links = html.h2("Metody získání dat") + \ html.p("Vypsána je vždy poslední použitá metoda, úplný seznam je v CSV souboru." + html.br()*2 + method_list) + \ ((html.a("%s.csv" % statid, "Zdrojová data ve formátu CSV") + html.br()) if singlestat else "") + \ html.a("index.htm", "Všechny metriky") try: min_value = str(min(map(lambda x: x.min(), statInstances))) except TypeError: min_value = '-' try: max_value = str(max(map(lambda x: x.max(), statInstances))) except TypeError: max_value = '-' common_replaces = { '%stat_name%': statname, '%stat_desc%': '', '%stat_id%': statid, '%stat_date%': '{0:%d.%m.%Y %H:%M:%S}'.format(datetime.datetime.now()), '%bottomlinks%': bottom_links, '%daterange%': '%s - %s' % (min_date, max_date), '%max%': max_value, '%min%': min_value } page = func.replace_all( func.readfile('templates/stat.htm'), merge_dicts( common_replaces, { '%stat_image%': "img/%s.png" % statid, '%stat_type%': "Absolutní hodnoty" })) func.writefile(page, "%s/%s.htm" % (dirname, statid)) page = func.replace_all( func.readfile('templates/stat.htm'), merge_dicts( common_replaces, { '%stat_image%': "img/%s.delta.png" % statid, '%stat_type%': "Denní přírůstky (delta)" })) func.writefile(page, "%s/%s.delta.htm" % (dirname, statid)) # vytvor CSV soubor se zdrojovymi daty if singlestat: csv_rows = [ "%s;%s;%s;%s;" % (statid, "{:%d.%m.%Y}".format(x[0]), x[1], x[2]) for x in list(involved_stats.values())[0] ] func.writefile( "stat_id;date;value;method;\n" + "\n".join(csv_rows), "%s/%s.csv" % (dirname, statid))