예제 #1
0
파일: views.py 프로젝트: ekeijl/BeAT
def graph_model(request, id, format="png"):
    """
	Output a graph for model comparison.
	Each seperate Model has one line; the data for this line is determined by Benchmarks that are filtered from the db.
	@param id ModelComparison ID from the database, used to filter the Benchmark data from the db.
	@param format The export format for the graph. Choices: ['png','pdf','ps','eps','svg']
	"""
    # General library stuff
    from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
    from matplotlib.lines import Line2D
    from matplotlib.figure import Figure
    import matplotlib
    import matplotlib.pyplot as plt

    # DB stuff
    from django.db.models import Count

    # Take the ModelComparison from db and filter data
    comparison = ModelComparison.objects.get(pk=id)
    c_tool = comparison.tool
    c_algo = comparison.algorithm
    c_type = comparison.type
    c_option = comparison.optionvalue

    fig = Figure(facecolor="w")
    ax = fig.add_subplot(111)

    # Lists of colors, styles and markers to get a nice unique style for each line
    colors = ("b", "g", "r", "c", "m", "y", "k")
    styles = ["-", "--", ":"]
    markers = ["+", "o", "x"]

    # Plot data
    axisNum = 0  # Counts the number of lines (to produce a unique style for each line)
    modelNames = Model.objects.values("name").annotate(num_models=Count("name"))

    # Plot a line for each model
    for m in modelNames:
        axisNum += 1
        style = styles[axisNum % len(styles)]
        color = colors[axisNum % len(colors)]
        marker = markers[axisNum % len(markers)]

        benchmarks = Benchmark.objects.filter(model__name__exact=m["name"])
        # Filter benchmarks based on the ModelComparison data
        benchmarks = benchmarks.filter(algorithm_tool__algorithm=c_algo, algorithm_tool__tool=c_tool).order_by(
            "algorithm_tool__date"
        )
        benchmarks = benchFind(benchmarks, [o.id for o in c_option.all()])

        if len(benchmarks) != 0:

            # Static data types to plot in the graph
            types = []
            if c_type == ModelComparison.TRANSITIONS:
                types = [b.transition_count for b in benchmarks]
            elif c_type == ModelComparison.STATES:
                types = [b.states_count for b in benchmarks]
            elif c_type == ModelComparison.VSIZE:
                types = [b.memory_VSIZE for b in benchmarks]
            elif c_type == ModelComparison.RSS:
                types = [b.memory_RSS for b in benchmarks]
            elif c_type == ModelComparison.ELAPSED_TIME:
                types = [b.elapsed_time for b in benchmarks]
            elif c_type == ModelComparison.TOTAL_TIME:
                types = [b.total_time for b in benchmarks]

                # Plot data
            lines = ax.plot([b.algorithm_tool.date for b in benchmarks], types, marker + style + color, label=m["name"])

            # Mark-up
    title = c_tool.name + c_algo.name
    if c_option.all():
        options = [str(o) for o in c_option.all()]
        title = title + " [" + ",".join(options) + "]"
    ax.set_title(title)

    # Print legend for lines in the graph.
    leg = ax.legend(fancybox=True, loc="upper left", bbox_to_anchor=(1, 1.15), markerscale=5)
    if leg:
        for t in leg.get_texts():
            t.set_fontsize("xx-small")

            # Print labels for the axes.
    y_label = c_type
    for l in ModelComparison.DATA_TYPES:
        a, b = l
        if a == c_type:
            y_label = b
    ax.set_ylabel(y_label)
    ax.set_xlabel("Revision date")
    fig.autofmt_xdate()

    fig.subplots_adjust(right=0.7)

    # Output
    canvas = FigureCanvas(fig)
    response = graph.export(canvas, comparison.name, format)
    return response
예제 #2
0
파일: views.py 프로젝트: ekeijl/BeAT
def scatterplot(request, id, format="png"):
    """
	Produces a scatterplot from a set of benchmarks.
	Filters two sets of Benchmark objects based on the Comparison object and takes total_time and memory_VSIZE values from them.
	@param id The identifier of the Comparison object.
	@param format The export format for the graph, png is default. Choices: ['png','pdf','ps','eps','svg']
	"""

    # Fetch two benchmarks sets from DB
    c = get_object_or_404(Comparison, id=id)

    # Fetch the AlgorithmTools
    at_a = c.algorithm_tool_a
    at_b = c.algorithm_tool_b

    # Fetch the OptionValues - note: use ov_a.all() to get the set of OptionValue objects!
    ov_a = c.optionvalue_a
    ov_b = c.optionvalue_b

    # First filter AlgorithmTool
    b1 = Benchmark.objects.filter(algorithm_tool=at_a)
    b2 = Benchmark.objects.filter(algorithm_tool=at_b)

    # Only keep Benchmark that have overlapping Models.
    b1 = b1.filter(model__in=[b.model.pk for b in b2])
    b2 = b2.filter(model__in=[b.model.pk for b in b1])

    # Filter the selected options from Benchmark sets.
    b1 = benchFind(b1, [o.id for o in ov_a.all()])
    b2 = benchFind(b2, [o.id for o in ov_b.all()])

    # Calculate the average values for Models that are double in the set.
    t1avg, m1avg = averageModels(b1)
    t2avg, m2avg = averageModels(b2)

    xName = ""
    yName = ""
    title = ""

    if at_a == at_b:
        title = str(at_a)
        xName = printOptions(ov_a)
        yName = printOptions(ov_b)
    else:
        xName = printLabel(at_a, ov_a)
        yName = printLabel(at_b, ov_b)

    if format == "multi-pdf":
        response = HttpResponse(content_type="application/pdf")
        pdf = PdfPages(response)

        fig = graph.makeScatter([(t1avg, t2avg)], ["Runtime (s)"], title, xName, yName)
        canvas = FigureCanvas(fig)
        pdf.savefig(figure=canvas.figure)

        fig2 = graph.makeScatter([(m1avg, m2avg)], ["Memory VSIZE (kb)"], title, xName, yName)
        canvas = FigureCanvas(fig2)
        pdf.savefig(figure=canvas.figure)

        d = pdf.infodict()
        d["Title"] = title or "BeAT Comparison"
        d["Author"] = str(request.user)
        d["Subject"] = "How to create a multipage pdf file and set its metadata"
        d["Keywords"] = "BeAT"
        d["CreationDate"] = datetime.datetime.now()
        d["ModDate"] = datetime.datetime.today()

        pdf.close()
        response["Content-Disposition"] = "attachment; filename=multipage_pdf.pdf"
    else:
        fig = graph.makeScatter(
            [(t1avg, t2avg), (m1avg, m2avg)], ["Runtime(s)", "Memory VSIZE (kb)"], title, xName, yName
        )
        canvas = FigureCanvas(fig)
        response = graph.export(canvas, c.name, format)
    return response
예제 #3
0
파일: views.py 프로젝트: ties/BeAT
def scatterplot(request, id, format='png'):
	"""
	Produces a scatterplot from a set of benchmarks.
	Filters two sets of Benchmark objects based on the Comparison object and takes total_time and memory_VSIZE values from them.
	@param id The identifier of the Comparison object.
	@param format The export format for the graph, png is default. Choices: ['png','pdf','ps','eps','svg']
	""" 
	
	# General library stuff
	from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
	from matplotlib.figure import Figure
	
	# Colorconverter to make red and blue dots in the plot
	from matplotlib.colors import ColorConverter
	cc=ColorConverter()

	import math
	fig=Figure(facecolor='w')
	
	# Make a subplot for the Total Time data of a benchmark set
	ax=fig.add_subplot(211)
	
	# Fetch two benchmarks sets from DB
	c = get_object_or_404(Comparison,id=id)
	at_a = c.algorithm_tool_a
	at_b = c.algorithm_tool_b
	
	# First filter AlgorithmTool
	b1 = Benchmark.objects.filter(algorithm_tool=at_a)
	b2 = Benchmark.objects.filter(algorithm_tool=at_b)
	
	# Only keep Benchmark that have overlapping Models.
	b1 = b1.filter(model__in=[b.model.pk for b in b2])
	b2 = b2.filter(model__in=[b.model.pk for b in b1])
	
	# Filter the selected options from Benchmark sets.
	b1 = benchFind(b1,[o.id for o in c.optionvalue_a.all()])
	b2 = benchFind(b2,[o.id for o in c.optionvalue_b.all()])
	
	# Calculate the average values for Models that are double in the set.
	model_list = []
	for b in b2:
		if not b.model.pk in model_list:
			model_list.append(b.model.pk)
	t1avg = []
	t2avg = []
	m1avg = []
	m2avg = []
	for model in model_list:
		tmp1 = []
		for bench in b1:
			if bench.model.pk == model:
				tmp1.append(bench)
		tmp2 = []
		for bench in b2:
			if bench.model.pk == model:
				tmp2.append(bench)
		if tmp1 and tmp2:
			x=[(float(b.total_time)) for b in tmp1]
			t1avg.append(float(sum(x))/len(x))
			x=[(float(b.total_time)) for b in tmp2]
			t2avg.append(float(sum(x))/len(x))
			x=[(float(b.memory_VSIZE)) for b in tmp1]
			m1avg.append(float(sum(x))/len(x))
			x=[(float(b.memory_VSIZE)) for b in tmp2]
			m2avg.append(float(sum(x))/len(x))

	# Check for empty set
	if len(t1avg) != 0:
		
		# Use the averages, in case any doubles occur
		t1 = t1avg
		t2 = t2avg
		
		# Color mask: if t[1] < t[2] --> red dot in graph; else blue dot
		mask = []
		for index in range(len(t1)):
			if t1[index] < t2[index]:
				mask.append(cc.to_rgb('red'))
			else:
				mask.append(cc.to_rgb('blue'))
		
		# Draw a linear function from .001 until the first power of 10 greater than max_value
		max_value_t = max(max(t1avg),max(t2avg))
		max_value_t = math.pow(10,math.ceil(math.log10(max_value_t)))
		ax.plot(np.arange(0,max_value_t,step=.001),np.arange(0,max_value_t,step=.001),'k-')
		
		# Plot data
		ax.scatter(t1, t2, s=10, color=mask, marker='o')
		
		# Axes mark-up
		ax.set_xscale('log')
		ax.set_yscale('log')
		ax.set_xlabel(printlabel(at_a,c.optionvalue_a), color='red')
		ax.set_ylabel(printlabel(at_b,c.optionvalue_b), color='blue')
		ax.set_title('Runtime (s)', size='small')
		ax.grid(True)
			
		# -------- Plotting memory data starts here in a new subplot ---------
		ax=fig.add_subplot(212)
		
		# Average memory values.
		m1 = m1avg
		m2 = m2avg
		
		# Color mask again
		mask = []
		for index in range(len(t1)):
			if m1[index] < m2[index]:
				mask.append(cc.to_rgb('blue'))
			else:
				mask.append(cc.to_rgb('red'))
		
		# Plot linear function again
		max_value_m = max(max(m1),max(m2))
		max_value_m = math.pow(10,math.ceil(math.log10(max_value_m)))
		ax.plot(np.arange(0,max_value_m),np.arange(0,max_value_m),'k-')
		
		# Axes mark-up
		ax.set_xscale('log')
		ax.set_yscale('log')
		ax.set_xlabel(printlabel(at_a,c.optionvalue_a), color='red')
		ax.set_ylabel(printlabel(at_b,c.optionvalue_b), color='blue')
		ax.grid(True)
		
		# Plot data
		ax.scatter(m1,m2,s=10,color=mask,marker='o')
		ax.set_title('Memory VSIZE (kb)', size='small')
	# Result set is empty
	else: 
		ax=fig.add_subplot(211)
		ax.set_title('Runtime (s)', size='small')
		ax.text(0.3,0.5,"Empty result set.") 
		ax=fig.add_subplot(212)
		ax.set_title('Memory VSIZE (kb)', size='small')
		ax.text(0.3,0.5,"Empty result set.")
	
	# Output graph
	fig.set_size_inches(5,10)
	canvas = FigureCanvas(fig)
	response = graph.export(canvas, c.name, format)
	return response