Example #1
0
def coverage_chart(request):
    boundary_id = request.GET.get('id')
    admin_level = request.GET.get('admin_level')

    chart = pygal.DateLine(x_label_rotation=90,
                           range=(0, 100),
                           fill=True,
                           style=LightStyle)

    today = date.today()

    if boundary_id:
        coverage_boundary = get_object_or_404(CoverageBoundary, pk=boundary_id)
        coverage_scores = CoverageScore.objects.filter(
            coverage_boundary_id=boundary_id).order_by('date')

        values = []

        last = len(coverage_scores) - 1
        for i, coverage_score in enumerate(coverage_scores):
            values.append(
                (coverage_score.date, round(coverage_score.coverage, 1)))

            if i == last and coverage_score.date != today:
                values.append((today, round(coverage_score.coverage, 1)))

        if coverage_boundary.abbreviation is not None:
            chart.add(coverage_boundary.abbreviation, values)
        else:
            chart.add(coverage_boundary.name, values)
    elif admin_level:
        coverage_boundaries = CoverageBoundary.objects.filter(
            admin_level=admin_level).order_by('rank')[:10]

        for coverage_boundary in coverage_boundaries:
            coverage_scores = CoverageScore.objects.filter(
                coverage_boundary_id=coverage_boundary.id)

            values = []

            last = len(coverage_scores) - 1
            for i, coverage_score in enumerate(coverage_scores):
                values.append(
                    (coverage_score.date, round(coverage_score.coverage, 1)))

                if i == last and coverage_score.date != today:
                    values.append((today, round(coverage_score.coverage, 1)))

            if coverage_boundary.abbreviation is not None:
                chart.add(coverage_boundary.abbreviation, values)
            else:
                chart.add(coverage_boundary.name, values)

    return HttpResponse(chart.render(), content_type="image/svg+xml")
Example #2
0
def main(city, dayToPlot, recently, thisDayInHistory, field):
    dailyData = daily.load(city)
    dailyDataClean(dailyData)
    if dayToPlot is None:
        dayToPlot = datetime.date.today()
    if recently:
        historyLines = daysRecently(dailyData, dayToPlot, field)
    else:
        historyLines = daysToday(dailyData, dayToPlot, field)

    if len(historyLines) == 1: #Most ever
        startDate = dailyData.firstDateWithValue(field.index)
    else:
        startDate = historyLines[1].date
        # - datetime.timedelta(days=(historyLines[0].date - historyLines[1].date).days * .1)
    endDate = historyLines[0].date + datetime.timedelta(days=1)
    plotData = []
    for date in daily.dayRange(startDate,
                               endDate):
        values = dailyData.get(date, None)
        if values is None:
            continue
        plotData.append( (date, values[field.index]) )
    style=pygal.style.Style(label_font_size=15, major_label_font_size=20)
    date_chart = pygal.DateLine(style=style,
                                print_values=True,
                                #truncate_label=1000,
                                x_label_rotation=80)
    date_chart.y_title = '{} ({})'.format(field.englishName,
                                          field.units)
    if field.minValue == 0:
        date_chart.add(None, plotData, show_dots=False, fill=True)
    else:
        date_chart.add(None, plotData, show_dots=False)

    labelByValue = {}
    for history in historyLines[0:2]:
        labelByValue[float(history.val)] = history.label
        date_chart.add(None,
                       ( ( startDate, history.val ),
                         ( historyLines[0].date, history.val ) ),
                       formatter = lambda t: labelByValue[t[1]] )

    historyType = ''
    if thisDayInHistory:
        historyType = 'thisDayInHistory'
    if recently:
        historyType = 'recently'
    fname = '{city}/{dayToPlot}.recordSince.{historyType}.png'.format(**locals())
    date_chart.render_to_png(fname,
                             width=1024, height=768)
    return fname
Example #3
0
def graficar_date_diagrama(list_consumos):

    dateline = pygal.DateLine(x_label_rotation=25)
    lista_labels = []
    lista_informacion_consumo = []
    for consumo in list_consumos:
        lista_labels.append(datetime.timestamp(consumo.date))
        lista_informacion_consumo.append(
            (datetime.timestamp(consumo.date), consumo.quantity))

    dateline.x_labels = lista_labels
    dateline.add("Consumo", lista_informacion_consumo)
    dateline.render()
    diagrama_renderizado = dateline.render_data_uri()
    return (diagrama_renderizado)
Example #4
0
def out_plot(scores, date_range, target_station, flag_data=None):
    line_map = [(dt.date(), sc) for dt, sc in zip(date_range, scores)]

    graph = pygal.DateLine(x_label_rotation=35, stroke=False, human_readable=True)  # disable_xml_declaration=True)
    graph.force_uri_protocol = 'http'
    graph.title = '{}: Score.'.format(target_station)
    graph.add(target_station, line_map)
    if flag_data is not None:
        flag_label = flag_data==FAULT_FLAG
        fault_line = [ line_map[ix] if flag_label[ix] else (None,None) for ix in range(len(flag_label))]

        graph.add('Faults', fault_line)

    graph_data = graph.render_data_uri()  # is_unicode=True)
    return graph_data
Example #5
0
    def dateline_plot(self, title, date_x, *args):

        # see http://www.pygal.org/en/stable/documentation/types/xy.html

        dateline = pygal.DateLine(self.chart_config, style=self.custom_style)
        dateline.title = 'Users and New Users versus time'
        dateline.x_labels = [
            date_x[0], date_x[round((len(date_x) - 1) * 0.25)], date_x[round(
                (len(date_x) - 1) * 0.5)], date_x[round(
                    (len(date_x) - 1) * 0.75)], date_x[round(
                        (len(date_x) - 1) * 1)]
        ]

        for a in args:
            dateline.add(a[0], [*a[1]])

        mychart = dateline.render()
        mychart_str = mychart.decode('utf-8')
        return mychart_str
Example #6
0
def getMonthlyBooksSavedGraph(request, username):
    if request.user.is_staff:
        books_saved = BookSaver.objects.filter(user__username=username)
        dates = set()
        x = set()
        for _ in books_saved:
            dates.add(_.date)
        for _ in dates:
            x.add(datetime.date(_.year, _.month, 1))
        x = sorted(x)
        dataset = [(_,
                    books_saved.filter(date__year=_.year,
                                       date__month=_.month).count())
                   for _ in x]
        dateline = pygal.DateLine(x_label_rotation=90)
        dateline.x_labels = x
        dateline.add("Books Saved for Each Month", dataset)
        return dateline.render_django_response()
    else:
        return HttpResponse("")
Example #7
0
def create_graph(currencies_codes, year, filename):
    dateline = pygal.DateLine(x_label_rotation=90)

    for key in currencies_codes:
        url = 'http://www.nbrb.by/API/ExRates/Rates/Dynamics/{c_code}?startDate={0}-1-1&endDate={0}-12-31'.format(
            year, c_code=currencies_codes[key])
        response = requests.get(url)
        if response.status_code == 200:
            get_currency_rait_by_year(dateline, key, response.json())
        else:
            print('Cannot read currency rait for {0}'.format(key))
            currencies_codes.pop('key', None)

    if not filename:
        filename = ""
        for key in currencies_codes:
            filename += "{0}-".format(key)
        filename += str(year)
        filename += ".svg"

    dateline.render_to_file(filename)
Example #8
0
    async def process(self, data):
        if data.prices is None or not data.prices["US"]["USD"]:
            return

        dataseries_x = []
        dataseries_y = []

        for entry in data.prices["US"]["USD"]:
            if dataseries_y:
                dataseries_y.append(dataseries_y[-1])
                dataseries_x.append(entry.date)
            if entry.price_final is None:
                dataseries_y.append(None)
                dataseries_x.append(entry.date)
            else:
                price_final = entry.price_final / 100
                dataseries_y.append(price_final)
                dataseries_x.append(entry.date)

        now_ts = datetime.datetime.now(datetime.timezone.utc).date()
        dataseries_y.append(dataseries_y[-1])
        dataseries_x.append(now_ts)
        dataseries = list(zip(dataseries_x, dataseries_y))
        max_price = max(filter(lambda x: x is not None, dataseries_y))
        range_top = int(math.ceil(max_price / 10) * 10)

        chart = pygal.DateLine()
        chart.width = 1000
        chart.height = 300
        chart.show_legend = False
        chart.range = (0, range_top)  # Round to nearest multiple of 10
        chart.max_scale = 6
        chart.include_x_axis = True
        chart.value_formatter = lambda p: "${:.2f}".format(p)
        chart.dots_size = 3
        chart.show_x_guides = True
        chart.js = []
        chart.css = []
        chart.add("Price", dataseries, allow_interruptions=True)

        chart_etree = chart.render_tree()

        # Replace default css with custom version
        defs = chart_etree.find("defs")
        defs.clear()
        style_el = Element("style", {"type": "text/css"})
        style_el.text = CHARTS_CSS
        defs.append(style_el)

        # Remove useless dots description elements
        for dots_el in chart_etree.iterfind(".//g[@class='dots']"):
            for desc_el in dots_el.findall("./desc"):
                if desc_el.attrib["class"] != "value":
                    dots_el.remove(desc_el)

        chart_xml = ElementTree(chart_etree)
        #chart_xml.write("figure.svg", encoding="utf-8", xml_declaration=True, pretty_print=False)
        chart_bytes = tostring(chart_xml,
                               encoding="utf-8",
                               xml_declaration=True,
                               pretty_print=False)
        chart_compressed = gzip.compress(chart_bytes)
        chart_path = self.db.path_chart(data.id)
        try:
            async with aiofiles.open(chart_path, "wb") as fobj:
                await fobj.write(chart_compressed)
        except FileNotFoundError:
            await aiofiles.os.makedirs(chart_path.parent, exist_ok=True)
            async with aiofiles.open(chart_path, "wb") as fobj:
                await fobj.write(chart_compressed)
Example #9
0
 def __init__(self, **kwargs):
     self.chart = pygal.DateLine(**kwargs)
     self.chart.title = "Tickets' updates"
     self.chart.x_label_rotation = 25
Example #10
0
#!/user/bin/python3
# -*- coding: utf-8 -*-

# Data:日期精确到日

import pygal
from datetime import date

data_chart = pygal.DateLine(x_label_rotation=25)
data_chart.x_labels =\
    [
    date(2013, 1, 1),
    date(2013, 7, 1),
    date(2014, 1, 1),
    date(2014, 7, 1),
    date(2015, 1, 1),
    date(2015, 7, 1)
    ]

data_chart.add("Serie", [(date(2013, 1, 2), 213), (date(2013, 8, 2), 281),
                         (date(2014, 12, 7), 198), (date(2015, 3, 21), 120)])

data_chart.render_to_file('./image/xy_data.svg')
Example #11
0
def plot_stats(item, index):
    global pth
    global daily_count
    global writing_stats
    global charts
    data = genfromtxt(writing_stats + '_' + dirs[index] + '.csv',
                      delimiter=',')
    if not os.path.exists(writing_stats + '_' + dirs[index] + '.csv'):
        print("File: " + writing_stats + '_' + dirs[index] + '.csv' +
              "doesn't exist, let me create it!")
        update_stats(item, index)
        words_stats(item, index)

    #---------Plot bar chart for words count for the prologue----------------
    #print("Plotted stats")
    data = pd.read_csv(writing_stats + '_' + dirs[index] + '.csv')
    print(data)
    bar_chart = pygal.Bar(style=LightStyle,
                          width=800,
                          height=600,
                          legend_at_bottom=True,
                          human_readable=True,
                          title='Prologue - words stats: ' + dirs[index])
    for ind, row in data.iterrows():
        bar_chart.add(row["date"], row["0_file"])

    bar_chart.render_to_file(charts + '0_file' + '_' + dirs[index] + '.svg')
    print("Plotting from: " + dirs[index])
    #---------Plot bar chart for words count for the whole project-----------
    bar_chart = pygal.Bar(style=LightStyle,
                          width=800,
                          height=600,
                          legend_at_bottom=True,
                          human_readable=True,
                          title='All - words stats: ' + dirs[index])
    for ind, row in data.iterrows():
        bar_chart.add(row["date"], row["all"])

    bar_chart.render_to_file(charts + 'all' + '_' + dirs[index] + '.svg')

    #---------Plot line chart for words count for the whole project---------
    dateline = pygal.DateLine(x_label_rotation=25, min_scale=20)
    words = []
    goal = [(parser.parse('2016-09-05 00:00:00'), 1),
            (parser.parse('2016-09-22 12:00:00'), 10000),
            (parser.parse('2016-10-09 12:00:00'), 20000),
            (parser.parse('2016-10-26 12:00:00'), 30000),
            (parser.parse('2016-11-12 12:00:00'), 40000),
            (parser.parse('2016-11-30 12:00:00'), 50000)]
    milestone1k = [(parser.parse('2016-09-04 00:00:00'), 10000),
                   (parser.parse('2016-11-30 23:00:00'), 10000)]
    milestone2k = [(parser.parse('2016-09-04 00:00:00'), 20000),
                   (parser.parse('2016-11-30 23:00:00'), 20000)]
    milestone3k = [(parser.parse('2016-09-04 00:00:00'), 30000),
                   (parser.parse('2016-11-30 23:00:00'), 30000)]
    milestone4k = [(parser.parse('2016-09-04 00:00:00'), 40000),
                   (parser.parse('2016-11-30 23:00:00'), 40000)]
    milestone5k = [(parser.parse('2016-09-04 00:00:00'), 50000),
                   (parser.parse('2016-11-30 23:00:00'), 50000)]
    for ind, row in data.iterrows():
        words.append((parser.parse(row["date"]), int(row["all"])))
    dateline.title = 'Words count for the whole project: ' + dirs[index]
    dateline.add('Words', words)
    dateline.add('Goal', goal)
    dateline.add('1k', milestone1k)
    dateline.add('2k', milestone2k)
    dateline.add('3k', milestone3k)
    dateline.add('4k', milestone4k)
    dateline.add('5k', milestone5k)
    dateline.render_to_file(charts + 'line' + '_' + dirs[index] + '.svg')

    #---------Plot line chart for words count for the session---------------
    difference = pygal.DateLine(x_label_rotation=25)
    words = []
    previous = data["all"][0]
    for ind, row in data.iterrows():
        if ind != 0:
            words.append(
                (parser.parse(row["date"]), int(row["all"]) - int(previous)))
        previous = row["all"]
    difference.title = 'Daily words count: ' + dirs[index]
    difference.add('Words', words)
    difference.render_to_file(charts + 'diff' + '_' + dirs[index] + '.svg')

    #---------Plot bar chart for words count for the session---------------
    bar_chart = pygal.Bar(width=800,
                          height=600,
                          title='All - words stats: ' + dirs[index])
    bar_chart.x_labels = []
    tmp = []
    chapts = []
    for i in range(len(files_list[index])):
        chapts.append([])
#    chapts = [[],[],[],[],[],[],[],[],[],[],[]]
    previous = data["all"][0]

    prev = []
    for i in range(len(chapts)):
        prev.append(data[str(i) + "_file"][0])

    for ind, row in data.iterrows():
        if ind != 0:
            for i in range(len(chapts)):
                chapts[i].append(int(row[str(i) + "_file"]) - int(prev[i]))
            tmp.append(int(row["all"]) - int(previous))
#            bar_chart.add( int(row["all"]) - int(previous) )
        previous = row["all"]
        for i in range(len(prev)):
            prev[i] = row[str(i) + "_file"]
        bar_chart.x_labels.append(parser.parse(row["date"]).date().day)

    i = 0
    for chap in chapts:
        bar_chart.add(str(i), chap)
        i = i + 1
    #bar_chart.add('words',tmp)
    bar_chart.render_to_file(charts + 'diff_bar_files' + '_' + dirs[index] +
                             '.svg')

    #---------Plot bar chart for words count for the session---------------
    bar_chart = pygal.Bar(width=800,
                          height=600,
                          title='All - words stats: ' + dirs[index])
    bar_chart.x_labels = []
    tmp = []
    previous = data["all"][0]

    for ind, row in data.iterrows():
        if ind != 0:

            tmp.append(int(row["all"]) - int(previous))


#            bar_chart.add( int(row["all"]) - int(previous) )
        previous = row["all"]
        bar_chart.x_labels.append(parser.parse(row["date"]).date().day)

    bar_chart.add('words', tmp)
    bar_chart.render_to_file(charts + 'diff_bar' + '_' + dirs[index] + '.svg')
Example #12
0
 def __init__(self):
     self.chart = pygal.DateLine(legend_at_bottom=True,
                                 legend_at_bottom_columns=2)
Example #13
0
 def __init__(self, ):
     self.chart = pygal.DateLine(secondary_range=(0, 100),
                                 legend_at_bottom=True,
                                 legend_at_bottom_columns=4)
def visualization(request):
    if request.method == 'POST':
        # print(request.POST) # The library names should appear in the request dictionary
        # Check with libraries are here, then display them. If empty, well dang, the person submitted an empty form. Display nothing

        # Do this for all libraries
        compare_libraries = []
        #testing_libraries
        if 'junit4' in request.POST:
            compare_libraries.append(lib_info['junit4'])
        if 'testng' in request.POST:
            compare_libraries.append(lib_info['testng'])

        #logging_libraries
        if 'slf4j' in request.POST:
            compare_libraries.append(lib_info['slf4j'])
        if 'log4j2' in request.POST:
            compare_libraries.append(lib_info['log4j2'])
        if 'logback' in request.POST:
            compare_libraries.append(lib_info['logback'])
        if 'commons logging' in request.POST:
            compare_libraries.append(lib_info['commons logging'])
        if 'tinylog' in request.POST:
            compare_libraries.append(lib_info['tinylog'])
        if 'blitz4j' in request.POST:
            compare_libraries.append(lib_info['blitz4j'])
        if 'minlog' in request.POST:
            compare_libraries.append(lib_info['minlog'])

        #utilities_libraries
        if 'guava' in request.POST:
            compare_libraries.append(lib_info['guava'])
        if 'commons lang' in request.POST:
            compare_libraries.append(lib_info['commons lang'])

        #mocking_libraries
        if 'mockito' in request.POST:
            compare_libraries.append(lib_info['mockito'])
        if 'easymock' in request.POST:
            compare_libraries.append(lib_info['easymock'])
        if 'powermock' in request.POST:
            compare_libraries.append(lib_info['powermock'])
        if 'jmock' in request.POST:
            compare_libraries.append(lib_info['jmock'])

        #cryptography_libraries
        if 'bouncycastle' in request.POST:
            compare_libraries.append(lib_info['bouncycastle'])
        if 'commons crypto' in request.POST:
            compare_libraries.append(lib_info['commons crypto'])
        if 'conceal' in request.POST:
            compare_libraries.append(lib_info['conceal'])
        if 'chimera' in request.POST:
            compare_libraries.append(lib_info['chimera'])
        if 'spongycastle' in request.POST:
            compare_libraries.append(lib_info['spongycastle'])
        if 'keyczar' in request.POST:
            compare_libraries.append(lib_info['keyczar'])
        if 'conscrypt' in request.POST:
            compare_libraries.append(lib_info['conscrypt'])

        #json_libraries
        if 'gson' in request.POST:
            compare_libraries.append(lib_info['gson'])
        if 'json.simple' in request.POST:
            compare_libraries.append(lib_info['json.simple'])

        #databases_libraries
        if 'h2' in request.POST:
            compare_libraries.append(lib_info['h2'])
        if 'derby' in request.POST:
            compare_libraries.append(lib_info['derby'])

        #security_libraries
        if 'shiro' in request.POST:
            compare_libraries.append(lib_info['shiro'])
        if 'spring security' in request.POST:
            compare_libraries.append(lib_info['spring security'])

        #ormapping_libraries
        if 'hibernate orm' in request.POST:
            compare_libraries.append(lib_info['hibernate orm'])
        if 'mybatis3' in request.POST:
            compare_libraries.append(lib_info['mybatis3'])
        if 'ormlite' in request.POST:
            compare_libraries.append(lib_info['ormlite'])

        #xml_libraries
        if 'xerces2-j' in request.POST:
            compare_libraries.append(lib_info['xerces2-j'])
        if 'dom4j' in request.POST:
            compare_libraries.append(lib_info['dom4j'])
        if 'jdom' in request.POST:
            compare_libraries.append(lib_info['jdom'])

        # List of all the different visualization names
        visualizations = [["Popularity Count"], ["Release Frequency"], ["Last Modified Date"], \
        ["Backwards Compatibility"], ["Stack Overflow"], ["Security & Performance"], ["Issue Data Response Time"], ["Issue Data Resolved Time"]]

        # set up library arrays
        library_names = []
        library_popularity = []
        library_releasedates = []
        library_lastModifiedDate = []
        library_breakingChanges = []
        library_QA_SO = []
        library_lastDiscussedSO = []
        library_Secruity_Performance = []
        library_responsetime = []
        library_resolvedtime = []
        libCount = len(compare_libraries)

        for library in compare_libraries:
            library_names.append(library['Name'])
            library_popularity.append(library['Popularity_Count'])
            releaseDates = []
            for releaseDate in library['Release_Dates']:
                datefilter = datetime.today() - timedelta(days=365)
                if datetime.strptime(releaseDate, "%Y-%m-%d") > datefilter:
                    releaseDates.append(
                        datetime.strptime(releaseDate, "%Y-%m-%d"))
            library_releasedates.append(releaseDates)
            library_lastModifiedDate.append(
                datetime.strptime(library['Last_Modification_Date'],
                                  "%Y-%m-%d"))
            library_breakingChanges.append(library['#_Breaking_Changes'])
            library_QA_SO.append(library['#_Questions_Asked_SO'])
            try:
                library_lastDiscussedSO.append(
                    datetime.strptime(library['Last_Discussed_SO'],
                                      "%Y-%m-%d"))
            except:  # No date
                library_lastDiscussedSO.append(None)

            init_count = [0, 0, 0, 0]  # Peformance - Sercuity - Both - Neither
            for issue_ID in library['Issue_Data']:
                if library['Issue_Data'][issue_ID][
                        'Performance_Issue'] == 'Yes':
                    if library['Issue_Data'][issue_ID][
                            'Security_Issue'] == 'Yes':
                        init_count[2] += 1
                    else:
                        init_count[0] += 1
                else:
                    if library['Issue_Data'][issue_ID][
                            'Security_Issue'] == 'Yes':
                        init_count[1] += 1
                    else:
                        init_count[3] += 1
            library_Secruity_Performance.append(init_count)

            timecategories = [0, 0, 0, 0,
                              0]  #[<day, <week, <month, >month, still pending]
            for issue_id in library['Issue_Data']:
                skipflag = False
                creationdate = datetime.strptime(
                    library['Issue_Data'][issue_id]['Issue_Creation_Date'],
                    "%Y-%m-%d %H:%M:%S")
                try:
                    closedate = datetime.strptime(
                        library['Issue_Data'][issue_id]['Issue_Close_Date'],
                        "%Y-%m-%d %H:%M:%S")
                except:  #date is none meaning issue is pending
                    timecategories[4] += 1
                    #print('pending')
                    skipflag = True

                if skipflag == False:
                    #hours difference https://stackoverflow.com/questions/5612129/converting-date-into-hours
                    resolvetime = (closedate -
                                   creationdate).total_seconds() / 3600.0
                    #print(resolvetime, creationdate, closedate, issue_id)

                    if resolvetime < 24:  # less than one day
                        timecategories[0] += 1
                    elif resolvetime < 168:  #less than one week
                        timecategories[1] += 1
                    elif resolvetime < 720:  #less than one month (30 days)
                        timecategories[2] += 1
                    elif resolvetime >= 720:  #greater than or equal to a month (30 days)
                        timecategories[3] += 1
            #print(timecategories)
            library_resolvedtime.append(timecategories)

            timecategories1 = [0, 0, 0, 0,
                               0]  #[<day, <week, <month, >month, no response]
            for issue_id in library['Issue_Data']:
                skipflag1 = False
                creationdate = datetime.strptime(
                    library['Issue_Data'][issue_id]['Issue_Creation_Date'],
                    "%Y-%m-%d %H:%M:%S")
                try:
                    responsedate = datetime.strptime(
                        library['Issue_Data'][issue_id]
                        ['Date_of_First_Comment'], "%Y-%m-%d %H:%M:%S")
                except:  #date is none meaning no comment
                    timecategories1[4] += 1
                    #print('pending')
                    skipflag1 = True

                if skipflag1 == False:
                    #hours difference https://stackoverflow.com/questions/5612129/converting-date-into-hours
                    resolvetime = (responsedate -
                                   creationdate).total_seconds() / 3600.0
                    #print(resolvetime, creationdate, closedate, issue_id)

                    if resolvetime < 24:  # less than one day
                        timecategories1[0] += 1
                    elif resolvetime < 168:  #less than one week
                        timecategories1[1] += 1
                    elif resolvetime < 720:  #less than one month (30 days)
                        timecategories1[2] += 1
                    elif resolvetime >= 720:  #greater than or equal to a month (30 days)
                        timecategories1[3] += 1
            #print(timecategories)
            library_responsetime.append(timecategories1)

    #--------- CUSTOM STYLE, USE THIS -----------------#
        custom_style = Style(label_font_size=25,
                             major_label_font_size=25,
                             value_font_size=25,
                             value_label_font_size=25,
                             title_font_size=25,
                             legend_font_size=25,
                             tooltip_font_size=15)
        log_style = Style(label_font_size=12,
                          major_label_font_size=12,
                          value_font_size=25,
                          value_label_font_size=25,
                          title_font_size=25,
                          legend_font_size=25,
                          tooltip_font_size=15)

        # ----- Popularity Count Graph
        bar_chart = pygal.Bar(dynamic_print_values=True,
                              style=custom_style,
                              legend_at_bottom=True)
        bar_chart.title = 'Repository Popularity Count'
        #bar_chart.x_labels = library_names
        for libraries in range(len(library_popularity)):
            bar_chart.add(library_names[libraries],
                          library_popularity[libraries])
        visualizations[0].append(bar_chart.render_data_uri())
        # with help from http://pygal.org/en/stable/documentation/output.html

        #ATTEMPT TO MAKE A FILTER
        #today = date.today()
        #https://stackoverflow.com/questions/993358/creating-a-range-of-dates-in-python
        #date_list = [today - timedelta(days=x) for x in range(0, 365)]
        #dateline.x_labels = date_list
        # ----- Release Frequency Graph
        dateline = pygal.DateLine(show_y_labels=False,
                                  x_label_rotation=25,
                                  style=custom_style,
                                  height=500,
                                  legend_at_bottom=True,
                                  show_x_guides=True,
                                  range=(0.9 - (libCount * 0.1), 1.0))
        dateline.title = "Repository Release Frequency"
        maxYAxis = 0.9
        for libraryIndex in range(len(library_releasedates)):
            releaseDatesTuple = []
            for releaseDatesIndex in range(
                    len(library_releasedates[libraryIndex])):
                releaseDatesTuple.append(
                    (library_releasedates[libraryIndex][releaseDatesIndex],
                     maxYAxis))
            maxYAxis -= 0.1
            dateline.add(library_names[libraryIndex],
                         releaseDatesTuple,
                         dots_size=15)
        visualizations[1].append(dateline.render_data_uri())

        # ----- Last Modified Date
        # Make x_labels, one month before min and one month after max
        firstDisplayedDate = getMonthDelta(min(library_lastModifiedDate),
                                           -1)  # Get a month before min date
        lastDisplayedDate = getMonthDelta(max(library_lastModifiedDate),
                                          1)  # Get a month after
        allMonths = monthsInBetween(firstDisplayedDate, lastDisplayedDate)
        dateline = pygal.DateLine(dynamic_print_values=True,
                                  x_label_rotation=20,
                                  show_y_labels=False,
                                  legend_at_bottom=True,
                                  style=custom_style,
                                  height=500,
                                  show_x_guides=True,
                                  range=(0.9 - (libCount * 0.1), 1.0))
        # ALSO DON'T NEED - just pollutes the x - axis
        # dateline.x_labels = allMonths
        dateline.title = 'Repository Last Modified Date'
        maxYAxis = 0.9
        for index in range(len(library_lastModifiedDate)):
            dateline.add(library_names[index],
                         [(library_lastModifiedDate[index], maxYAxis)],
                         dots_size=25)
            maxYAxis -= 0.1
        visualizations[2].append(dateline.render_data_uri())
        # Store components - visualizations[2] is Last Modified Date

        # ----- Backwards Compatibility
        # make the x-axis labels pretty
        # allDates = []
        # for library_datelist in library_releasedates:
        #     allDates += library_datelist

        bar_chart = pygal.DateLine(style=log_style,
                                   x_label_rotation=25,
                                   legend_at_bottom=True,
                                   show_x_guides=True,
                                   logarithmic=True)
        bar_chart.title = 'Number of Breaking Changes in Each Release'
        # DON'T NEED - bar_chart.x_labels = map(lambda d: d.strftime('%Y-%m-%d'), allDates)
        for libraryIndex in range(len(library_breakingChanges)):
            release_breakingChange_tuples = []
            #print(str(len(library_releasedates[libraryIndex])) + " - " + str(len(library_breakingChanges[libraryIndex])))
            for dateIndex in range(
                    min(len(library_releasedates[libraryIndex]),
                        len(library_breakingChanges[libraryIndex]))):
                # So, breaking changes and released dates aren't exactly the same. That's why we look for min(), so things dont break
                release_breakingChange_tuples.append(
                    (library_releasedates[libraryIndex][dateIndex],
                     library_breakingChanges[libraryIndex][dateIndex]))
            bar_chart.add(library_names[libraryIndex],
                          release_breakingChange_tuples,
                          dots_size=5)

        visualizations[3].append(bar_chart.render_data_uri())
        # Store components - visualizations[3] is Backwards Compatibility

        # ----- Stack Overflow
        bar_chart = pygal.Bar(dynamic_print_values=True, style=custom_style)
        bar_chart.title = 'Number of Questions Asked on Stack Overflow'
        #bar_chart.x_labels = library_names
        for libraries in range(len(library_QA_SO)):
            bar_chart.add(library_names[libraries], library_QA_SO[libraries])

        visualizations[4].append(bar_chart.render_data_uri())
        # Store components - visualizations[4] is Stack Overflow

        # Second chart
        dateline = pygal.DateLine(dynamic_print_values=True,
                                  x_label_rotation=20,
                                  show_y_labels=False,
                                  legend_at_bottom=True,
                                  style=custom_style,
                                  height=400,
                                  show_x_guides=True,
                                  range=(0.9 - (libCount * 0.1), 1.0))
        dateline.title = 'Last Discussed on Stack Overflow'

        notDiscussed = []
        maxYAxis = 0.9
        for index in range(len(library_lastDiscussedSO)):
            dateline.add(library_names[index],
                         [(library_lastDiscussedSO[index], maxYAxis)],
                         dots_size=25)
            if library_lastDiscussedSO[index] == None:
                notDiscussed.append(library_names[index])
            maxYAxis -= 0.1

        visualizations[4].append(dateline.render_data_uri())
        # Store components - visualizations[4] is Stack Overflow

        # ----- Security & Performance
        labels = ['Performance', 'Security', 'Both', 'Neither']
        tran_data = [[], [], [], []]
        bar_chart = pygal.StackedBar(dynamic_print_values=True,
                                     style=custom_style,
                                     legend_at_bottom=True)
        bar_chart.title = 'Security and Performance Percentage'
        bar_chart.x_labels = library_names

        for libraries in range(len(library_Secruity_Performance)):
            for label_num in range(len(labels)):
                tran_data[label_num].append(
                    library_Secruity_Performance[libraries][label_num])
        for label in range(len(labels)):
            bar_chart.add(labels[label], tran_data[label])

        # Store components - visualizations[5] is Security & Performance
        visualizations[5].append(bar_chart.render_data_uri())

        # ----- Issue Data Response Time

        labels = ['<Day', '<Week', '<Month', '>Month', 'No response']
        bar_chart = pygal.Bar(dynamic_print_values=True, legend_at_bottom=True)
        bar_chart.title = 'Issue Data Response Time'
        bar_chart.x_labels = labels
        for libraries in range(len(library_responsetime)):
            bar_chart.add(library_names[libraries],
                          library_responsetime[libraries])
        visualizations[6].append(bar_chart.render_data_uri())

        # Store components - visualizations[6] is Issue Data Response Time

        # ----- Issue Data Resolved Time
        labels = ['<Day', '<Week', '<Month', '>Month', 'Pending']
        bar_chart = pygal.Bar(dynamic_print_values=True, legend_at_bottom=True)
        bar_chart.title = 'Issue Data Resolved Time'
        bar_chart.x_labels = labels
        for libraries in range(len(library_resolvedtime)):
            bar_chart.add(library_names[libraries],
                          library_resolvedtime[libraries])
        visualizations[7].append(bar_chart.render_data_uri())

        # Store components - visualizations[7] is Issue Data Resolved Time

        #Feed them to the Django template.
        return render(request, 'visualization_app/visualizations.html', {
            'visualizations': visualizations,
            'notDiscussed': notDiscussed,
        })

    else:
        return redirect('/')  # Go back to main screen
Example #15
0
def index():
    try:
        # If path is called by an entry post:
        if request.method == "POST":
            # Save all input data as variables.
            input_date = request.form.get("date")
            input_sex = request.form.get("sex")
            input_age = int(request.form.get("age"))
            input_weight = float(request.form.get("weight"))
            input_pec = float(request.form.get("pectoralis"))
            input_tri = float(request.form.get("triceps"))
            input_sub = float(request.form.get("subscapular"))
            input_axi = float(request.form.get("axillary"))
            input_sup = float(request.form.get("suprailliac"))
            input_abd = float(request.form.get("abdomen"))
            input_thi = float(request.form.get("thigh"))
            # Calculate sum of input skinfolds.
            input_sf_sum = input_pec + input_tri + input_sub + input_axi +\
            input_sup + input_abd + input_thi
            # Calculate and save bodyfat percentage dependent on sex.
            if input_sex == "M":
                bf_calc = bodyfat_M(input_sf_sum, input_age)
            else:
                bf_calc = bodyfat_F(input_sf_sum)
            # Calculate and save fat and fat free mass.
            fat_mass = input_weight * (bf_calc / 100)
            fat_free_mass = input_weight - fat_mass

            # Add all variables to the local "database"
            calculations.append([input_date, input_sex, input_age, input_weight,\
            input_pec, input_tri, input_sub, input_axi, input_sup, input_abd,\
            input_thi, round(input_sf_sum, 1), round(bf_calc, 1),\
            round(fat_free_mass, 1), round(fat_mass, 1)])
            date_data.append(input_date.split("-"))
            weight_data.append(input_weight)
            pec_data.append(input_pec)
            tri_data.append(input_tri)
            sub_data.append(input_sub)
            axi_data.append(input_axi)
            sup_data.append(input_sup)
            abd_data.append(input_abd)
            thi_data.append(input_thi)
            bf_data.append(round(bf_calc, 1))
            ffm_data.append(round(fat_free_mass, 1))
            fm_data.append(round(fat_mass, 1))

#-----CHART 1 (bodyfat and skinfold measurements over time)-----#
        sites_date_chart = pygal.DateLine(x_label_rotation=20)
        # Data for the bodyfat line. List of tuples in the format (date, bodyfat)
        sites_date_chart.add(
            "Bodyfat (%)", [(date(int(j[0]), int(j[1]), int(j[2])), bf_data[i])
                            for i, j in enumerate(date_data)])
        # The same for the all the sites
        sites_date_chart.add(
            "Pectoralis (mm)",
            [(date(int(j[0]), int(j[1]), int(j[2])), pec_data[i])
             for i, j in enumerate(date_data)])
        sites_date_chart.add(
            "Triceps (mm)",
            [(date(int(j[0]), int(j[1]), int(j[2])), tri_data[i])
             for i, j in enumerate(date_data)])
        sites_date_chart.add(
            "Subscapular (mm)",
            [(date(int(j[0]), int(j[1]), int(j[2])), sub_data[i])
             for i, j in enumerate(date_data)])
        sites_date_chart.add(
            "Axillary (mm)",
            [(date(int(j[0]), int(j[1]), int(j[2])), axi_data[i])
             for i, j in enumerate(date_data)])
        sites_date_chart.add(
            "Suprailliac (mm)",
            [(date(int(j[0]), int(j[1]), int(j[2])), sup_data[i])
             for i, j in enumerate(date_data)])
        sites_date_chart.add(
            "Abdomen (mm)",
            [(date(int(j[0]), int(j[1]), int(j[2])), abd_data[i])
             for i, j in enumerate(date_data)])
        sites_date_chart.add(
            "Thigh (mm)", [(date(int(j[0]), int(j[1]), int(j[2])), thi_data[i])
                           for i, j in enumerate(date_data)])
        # Render chart.
        sites_date_chart_data = sites_date_chart.render_data_uri()

        #-----CHART 2 (weight, fat free mass and fat mass over time)-----#
        mass_date_chart = pygal.DateLine(x_label_rotation=20)
        # Adds data to chart as above.
        mass_date_chart.add(
            "Weight (kg)",
            [(date(int(j[0]), int(j[1]), int(j[2])), weight_data[i])
             for i, j in enumerate(date_data)])
        mass_date_chart.add(
            "Fat free mass (kg)",
            [(date(int(j[0]), int(j[1]), int(j[2])), ffm_data[i])
             for i, j in enumerate(date_data)])
        mass_date_chart.add(
            "Fat mass (kg)",
            [(date(int(j[0]), int(j[1]), int(j[2])), fm_data[i])
             for i, j in enumerate(date_data)])
        # Render chart.
        mass_date_chart_data = mass_date_chart.render_data_uri()

    # If any error is encountered, go to oops.html.
    except:
        return render_template("oops.html")

    # Go to index.html and bring listed data.
    return render_template("index.html",
                           calculations=calculations,
                           sites_date_chart_data=sites_date_chart_data,
                           mass_date_chart_data=mass_date_chart_data)
async def budgetLineChart(project, df):
    cstyle = Style(major_label_font_size=22,
                   label_font_size=22,
                   legend_font_size=23,
                   value_font_size=22,
                   title_font_size=22,
                   colors=('#DE8F6E', '#00AABB', '#0000FF'),
                   font_family='googlefont: Helvetica')
    dateline_chart = pygal.DateLine(width=650,
                                    height=600,
                                    legend_at_bottom=True,
                                    legend_at_bottom_columns=3,
                                    truncate_label=-1,
                                    x_labels_major_count=3,
                                    show_minor_x_labels=False,
                                    x_label_rotation=0,
                                    margin_right=50,
                                    x_value_formatter=date_formatter,
                                    value_formatter=HM.ToKMB,
                                    no_data_text='Not Available',
                                    js=[],
                                    style=cstyle)

    planned = []
    forecast = []
    actual = []
    budgetRows = df.to_dict('records')
    today = dt.datetime.now().date()
    forecastFlag = False
    budgetDateDate = []
    i = 0
    for budgetRow in budgetRows:
        if not forecastFlag:
            if (budgetRow['budgetdate'] < today):
                forecastFlag = True

        budgetDateDate.append(budgetRow['budgetdate'])
        if (i == 0):
            actual.append((budgetRow['budgetdate'], budgetRow['actual']))
            planned.append((budgetRow['budgetdate'], budgetRow['planned']))
            forecast.append((budgetRow['budgetdate'], budgetRow['actual']))
        else:
            actual.append((budgetRow['budgetdate'],
                           budgetRow['actual'] + actual[i - 1][1]))
            planned.append((budgetRow['budgetdate'],
                            budgetRow['planned'] + planned[i - 1][1]))

            if (budgetRow['budgetdate'] < today):
                forecast.append((budgetRow['budgetdate'],
                                 budgetRow['actual'] + forecast[i - 1][1]))
            else:
                forecast.append((budgetRow['budgetdate'],
                                 budgetRow['forecast'] + forecast[i - 1][1]))
        i += 1

    dateline_chart.x_labels = budgetDateDate
    if (forecastFlag):
        dateline_chart.add(title='Forecast', values=forecast)
        # dateline_chart.add('Forecast',forecast)
    dateline_chart.add(title='Actual', values=actual)
    dateline_chart.add(title='Planned', values=planned)
    # dateline_chart.add('Actual',actual)
    # dateline_chart.add('Planned',planned)

    return dateline_chart.render_data_uri()