예제 #1
0
def slowdown_cdf_big(args, datas):
    args = args.args
    L = int(args[0]) if args else 0

    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number, data in enumerate(datas):
        means = data.means
        slowdowns = means / means[0, :]
        graph = lnm.fromkeyvals(data.names, slowdowns)
        graph = lnm.compute_lnm_times(graph, L)

        results = graph.ungraph()[1]
        results = zip(*results)
        entries = means.shape[0]

        for i, result in enumerate(results):
            if i == 1:
                continue
            counts, bin_edges = np.histogram(result, bins=max(entries, 1024))
            counts = counts * (100.0 / float(entries))
            cdf = np.cumsum(counts)
            ax.plot(bin_edges[:-1],
                    cdf,
                    LINESTYLES[number],
                    label=LABELS[i],
                    color=COLORS[i])

        upper = 50

        plt.xlim((1, upper))
        ax.set_xticks([1] + range(5, upper + 1, 5))
        ax.set_xticklabels(["1x"] +
                           ["%dx" % i for i in range(5, upper + 1, 5)])
        plt.ylim((0, 100))
예제 #2
0
def slowdown_cdf_big(args, datas):
    args = args.args
    L = int(args[0]) if args else 0

    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number, data in enumerate(datas):
        means = data.means
        slowdowns = means / means[0,:]
        graph = lnm.fromkeyvals(data.names, slowdowns)
        graph = lnm.compute_lnm_times(graph, L)

        results = graph.ungraph()[1]
        results = zip(*results)
        entries = means.shape[0]

        for i, result in enumerate(results):
            if i == 1:
                continue
            counts, bin_edges = np.histogram(result, bins=max(entries, 1024))
            counts = counts * (100.0 / float(entries))
            cdf = np.cumsum(counts)
            ax.plot(bin_edges[:-1], cdf, LINESTYLES[number], label=LABELS[i], color=COLORS[i])

        upper = 50

        plt.xlim((1,upper))
        ax.set_xticks([1] + range(5, upper + 1, 5))
        ax.set_xticklabels(["1x"] + ["%dx" % i for i in range(5, upper + 1, 5)])
        plt.ylim((0, 100))
예제 #3
0
def slowdown_cdf(args, datas):
    assert datas
    if not args.args:
        LS = [0]
    else:
        LS = map(int, args.args)

    if args.xmin is None:
        xmin = 1
    else:
        xmin = args.xmin

    if args.xmax is None:
        xmax = 10
    else:
        xmax = args.xmax

    names = datas[0].names
    data = np.hstack([d.means for d in datas])

    norm = args.norm and args.norm[0]
    if norm is None or norm == -1:
        norm = range(data.shape[-1])
    else:
        assert norm >= 0
    slowdowns = data / data[0, norm]

    systems = args.systems
    if systems is not None:
        slowdowns = slowdowns[:, systems]

    colors = colors_array(args)
    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number in LS:

        graph = lnm.fromkeyvals(names, slowdowns)
        graph = lnm.compute_lnm_times(graph, number)

        results = graph.ungraph()[1]
        results = zip(*results)
        entries = data.shape[0]

        for i, result in enumerate(results):
            counts, bin_edges = np.histogram(result, bins=max(entries, 1024))
            counts = counts * (100.0 / float(entries))
            cdf = np.cumsum(counts)
            ax.plot(bin_edges[:-1],
                    cdf,
                    LINESTYLES[number],
                    label=LABELS[i],
                    color=colors[i])

    # plt.axvline(3, color=VLINE)
    plt.xlim((xmin, xmax))
    ax.set_xticks(range(1, xmax + 1))
    ax.set_xticklabels(["%dx" % (i + 1) for i in range(xmax)])
    plt.ylim((0, 100))
예제 #4
0
def slowdown_cdf(args, datas):
    assert datas
    if not args.args:
        LS = [0]
    else:
        LS = map(int, args.args)

    names = datas[0].names
    data = np.hstack([d.means for d in datas])

    norm = args.norm and args.norm[0]
    if norm is None or norm == -1:
        norm = range(data.shape[-1])
    else:
        assert norm >= 0
    slowdowns = data / data[0,norm]

    systems = args.systems
    if systems is not None:
        slowdowns = slowdowns[:,systems]

    colors = colors_array(args)
    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number in LS:

        graph = lnm.fromkeyvals(names, slowdowns)
        graph = lnm.compute_lnm_times(graph, number)

        results = graph.ungraph()[1]
        results = zip(*results)
        entries = data.shape[0]

        for i, result in enumerate(results):
            counts, bin_edges = np.histogram(result, bins=max(entries, 1024))
            counts = counts * (100.0 / float(entries))
            cdf = np.cumsum(counts)
            ax.plot(bin_edges[:-1], cdf, LINESTYLES[number], label=LABELS[i], color=colors[i])

    upper = 10
    plt.axvline(3, color=VLINE)
    plt.xlim((1,upper))
    ax.set_xticks(range(1, upper + 1))
    ax.set_xticklabels(["%dx" % (i + 1) for i in range(upper)])
    plt.ylim((0, 100))
예제 #5
0
def slowdown_cdf_hidden(args, datas):

    if args.args:
        upper = int(args.args[0])
    else:
        upper = 5
    L = 0

    LINESTYLES = ['-', ':']

    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number, data in enumerate(datas):
        means = data.means
        slowdowns = means / means[0, :]
        graph = lnm.fromkeyvals(data.names, slowdowns)
        graph = lnm.compute_lnm_times(graph, L)

        print np.sum(slowdowns < 3.0, axis=0)

        results = graph.ungraph()[1]
        results = zip(*results)
        entries = means.shape[0]

        for i, result in enumerate(results):
            if args.systems is not None and i not in args.systems:
                continue
            median = np.median(result)
            counts, bin_edges = np.histogram(result, bins=max(entries, 1024))
            counts = counts * (100.0 / float(entries))
            cdf = np.cumsum(counts)
            ax.plot(bin_edges[:-1],
                    cdf,
                    LINESTYLES[number],
                    label=LABELS[i],
                    color=COLORS[i])

        # plt.axvline(3, color=VLINE)
        plt.xlim((1, upper))
        ax.set_xticks(range(1, upper + 1))
        ax.set_xticklabels(["%dx" % (i + 1) for i in range(upper)])
        plt.ylim((0, 100))
예제 #6
0
def slowdown_cdf_hidden(args, datas):

    if args.args:
        upper = int(args.args[0])
    else:
        upper = 5
    L = 0

    LINESTYLES = ['-', ':']

    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number, data in enumerate(datas):
        means = data.means
        slowdowns = means / means[0,:]
        graph = lnm.fromkeyvals(data.names, slowdowns)
        graph = lnm.compute_lnm_times(graph, L)

        print np.sum(slowdowns < 3.0, axis=0)

        results = graph.ungraph()[1]
        results = zip(*results)
        entries = means.shape[0]

        for i, result in enumerate(results):
            if args.systems is not None and i not in args.systems:
                continue
            median = np.median(result)
            counts, bin_edges = np.histogram(result, bins=max(entries, 1024))
            counts = counts * (100.0 / float(entries))
            cdf = np.cumsum(counts)
            ax.plot(bin_edges[:-1], cdf, LINESTYLES[number], label=LABELS[i], color=COLORS[i])

        plt.axvline(3, color=VLINE)
        plt.xlim((1,upper))
        ax.set_xticks(range(1, upper + 1))
        ax.set_xticklabels(["%dx" % (i + 1) for i in range(upper)])
        plt.ylim((0, 100))
예제 #7
0
def slowdown_cdf(datas):

    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number, data in enumerate(datas):
        weights = [
            np.array([1.0 / float(d.means.shape[0])] * d.means.shape[0])
            for d in data
        ]
        slowdowns = [d.means / d.means[0, :] for d in data]

        graphs = [
            lnm.fromkeyvals(d.names, slowdown)
            for d, slowdown in zip(data, slowdowns)
        ]
        graphs = [lnm.compute_lnm_times(g, L=1) for g in graphs]
        slowdowns1 = [g.ungraph()[1] for g in graphs]

        graphs = [
            lnm.fromkeyvals(d.names, slowdown)
            for d, slowdown in zip(data, slowdowns)
        ]
        graphs = [lnm.compute_lnm_times(g, L=2) for g in graphs]
        slowdowns2 = [g.ungraph()[1] for g in graphs]

        largest_dims = max(*[s.shape[-1] for s in slowdowns])

        _, slowdowns = pad_weights(weights, slowdowns)
        _, slowdowns1 = pad_weights(weights, slowdowns1)
        weights, slowdowns2 = pad_weights(weights, slowdowns2)

        weights = np.vstack(weights)
        all_data = np.vstack(slowdowns)
        all_data1 = np.vstack(slowdowns1)
        all_data2 = np.vstack(slowdowns2)

        N = all_data.shape[-1]
        for i in range(2):
            entries = np.sum(weights[:, i])
            result = all_data[:, i]
            counts, bin_edges = np.histogram(result,
                                             bins=len(result),
                                             weights=weights[:, i])
            cdf = np.cumsum(counts) / np.sum(entries) * 100.0
            ax.plot(bin_edges[:-1],
                    cdf,
                    LINESTYLES[number],
                    label=LABELS[i],
                    color=COLORS[i])

        # add lines for L=1
        result = all_data1[:, i]
        counts, bin_edges = np.histogram(result,
                                         bins=len(result),
                                         weights=weights[:, 1])
        cdf = np.cumsum(counts) / entries * 100.0
        ax.plot(bin_edges[:-1],
                cdf,
                LINESTYLES[number],
                label=LABELS[i],
                color=(0, 0, 0))

        total_benchmarks = np.sum(weights, axis=0)
        avg_slowdown_weighted = np.sum(weights * all_data,
                                       axis=0) / total_benchmarks
        avg_slowdown_weighted1 = np.sum(weights * all_data1,
                                        axis=0) / total_benchmarks
        s3 = np.sum(weights *
                    (all_data < 2.0), axis=0) * 100.0 / total_benchmarks
        s4 = np.sum(weights *
                    (all_data1 < 2.0), axis=0) * 100.0 / total_benchmarks
        s5 = np.sum(weights *
                    (all_data < 1.1), axis=0) * 100.0 / total_benchmarks
        s6 = np.sum(weights *
                    (all_data1 < 1.1), axis=0) * 100.0 / total_benchmarks
        s7 = np.sum(weights *
                    (all_data2 < 1.1), axis=0) * 100.0 / total_benchmarks

        def rnd(x):
            return round(x, 0)

        if number != 0:
            print "\multicolumn{8}{|c|}{%s} \\\\" % SUFFIXES[number]
            print "\\hline"
        for i in range(len(avg_slowdown_weighted)):
            s1 = round(avg_slowdown_weighted[i], 2)
            s2 = round(avg_slowdown_weighted1[i], 2)
            print "%s & $%0.2f\\times$ & $%0.2f\\times$ & $%0.0f$ & $%0.0f$ & $%0.0f$ & $%0.0f$ & $%0.0f$ \\\\" % (
                (LABELS[i].capitalize(), s1, s2) +
                tuple(map(rnd, (s3[i], s4[i], s5[i], s6[i], s7[i]))))
        print "\\hline"

    plt.axvline(3, color=COLORS[-1])
    plt.xlim((1, 10))

    ax.set_xlabel("slowdown factor")
    ax.set_ylabel("% of benchmarks")
    ax.set_xticklabels(["%dx" % (i + 1) for i in range(10)])
    plt.ylim((0, 100))
    plt.savefig("figs/aggregate-cdf.pdf")

    for number, data in enumerate(datas):
        plt.cla()
        weights = [np.ones(d.means.shape[0]) / d.means.shape[0] for d in data]
        slowdowns = [d.means / d.means[0, 0] for d in data]

        weights, slowdowns = pad_weights(weights, slowdowns)
        weights = np.vstack(weights)
        all_data = np.vstack(slowdowns)

        for i in range(0, 2):
            ax.scatter(all_data[:, 0],
                       all_data[:, i],
                       label=LABELS[i],
                       color=COLORS[i],
                       marker='.')
            if i == 0:
                continue
            # m, b = np.polyfit(all_data[:,0], all_data[:,i], 1)
            m, b, r, _, _ = linregress(all_data[:, 0], all_data[:, i])
            x = np.vstack([np.arange(0, 100, 0.01), np.ones(10000)]).T
            print "y = %f * x + %f : r^2 = %f" % (m, b, r**2)
            y = m * x + b
            ax.plot(x, y, color=COLORS[i + 2])
            textX = np.max(all_data[:, 0]) / 2.0
            textY = np.max(all_data[:, i]) / 1.8
            plt.text(textX,
                     textY,
                     '$y = %0.3f x + %0.3f$ \n $r^2 = %0.3f$' % (m, b, r**2),
                     fontsize=20,
                     color='k',
                     horizontalalignment='center',
                     verticalalignment='bottom')

        plt.legend(loc='upper left')
        plt.ylim((0, 70))
        plt.xlim((0, 70))
        ax.set_xlabel("Racket gradual typing overhead")
        ax.set_ylabel("overhead relative to Racket")
        plt.savefig("figs/aggregate-slowdown-%d.pdf" % number)
def slowdown_cdf(datas):

    fig, ax = plt.subplots(nrows=1, ncols=1)
    for number, data in enumerate(datas):
        weights   = [np.array([1.0 / float(d.means.shape[0])] * d.means.shape[0]) for d in data]
        slowdowns = [d.means / d.means[0,:] for d in data]

        graphs = [lnm.fromkeyvals(d.names, slowdown) for d, slowdown in zip(data, slowdowns)]
        graphs = [lnm.compute_lnm_times(g, L=1) for g in graphs]
        slowdowns1 = [g.ungraph()[1] for g in graphs]

        graphs = [lnm.fromkeyvals(d.names, slowdown) for d, slowdown in zip(data, slowdowns)]
        graphs = [lnm.compute_lnm_times(g, L=2) for g in graphs]
        slowdowns2 = [g.ungraph()[1] for g in graphs]

        largest_dims = max(*[s.shape[-1] for s in slowdowns])

        _, slowdowns = pad_weights(weights, slowdowns)
        _, slowdowns1 = pad_weights(weights, slowdowns1)
        weights, slowdowns2 = pad_weights(weights, slowdowns2)

        weights   = np.vstack(weights)
        all_data  = np.vstack(slowdowns)
        all_data1 = np.vstack(slowdowns1)
        all_data2 = np.vstack(slowdowns2)

        N = all_data.shape[-1]
        for i in range(2):
            entries = np.sum(weights[:,i])
            result = all_data[:,i]
            counts, bin_edges = np.histogram(result, bins=len(result), weights=weights[:,i])
            cdf = np.cumsum(counts) / np.sum(entries) * 100.0
            ax.plot(bin_edges[:-1], cdf, LINESTYLES[number], label=LABELS[i], color=COLORS[i])

        # add lines for L=1
        result = all_data1[:,i]
        counts, bin_edges = np.histogram(result, bins=len(result), weights=weights[:,1])
        cdf = np.cumsum(counts) / entries * 100.0
        ax.plot(bin_edges[:-1], cdf, LINESTYLES[number], label=LABELS[i], color=(0,0,0))

        total_benchmarks = np.sum(weights, axis=0)
        avg_slowdown_weighted  = np.sum(weights * all_data, axis=0)  / total_benchmarks
        avg_slowdown_weighted1 = np.sum(weights * all_data1, axis=0) / total_benchmarks
        s3 = np.sum(weights * (all_data < 3.0)  , axis=0) * 100.0  / total_benchmarks
        s4 = np.sum(weights * (all_data1 < 3.0) , axis=0) * 100.0  / total_benchmarks
        s5 = np.sum(weights * (all_data < 1.1)  , axis=0) * 100.0  / total_benchmarks
        s6 = np.sum(weights * (all_data1 < 1.1) , axis=0) * 100.0  / total_benchmarks
        s7 = np.sum(weights * (all_data2 < 1.1) , axis=0) * 100.0  / total_benchmarks

        def rnd(x):
            return round(x, 0)

        if number != 0:
            print "\multicolumn{8}{|c|}{%s} \\\\" % SUFFIXES[number]
            print "\\hline"
        for i in range(len(avg_slowdown_weighted)):
            s1 = round(avg_slowdown_weighted[i], 2)
            s2 = round(avg_slowdown_weighted1[i], 2)
            print "%s & $%0.2f\\times$ & $%0.2f\\times$ & $%0.0f$ & $%0.0f$ & $%0.0f$ & $%0.0f$ & $%0.0f$ \\\\" % ((LABELS[i].capitalize(), s1, s2) + tuple(map(rnd, (s3[i], s4[i], s5[i], s6[i], s7[i]))))
        print "\\hline"

    plt.axvline(3, color=COLORS[-1])
    plt.xlim((1,10))

    ax.set_xlabel("slowdown factor")
    ax.set_ylabel("% of benchmarks")
    ax.set_xticklabels(["%dx" % (i + 1) for i in range(10)])
    plt.ylim((0, 100))
    plt.savefig("figs/aggregate-cdf.pdf")

    for number, data in enumerate(datas):
        plt.cla()
        weights   = [np.ones(d.means.shape[0]) / d.means.shape[0] for d in data]
        slowdowns = [d.means / d.means[0,0] for d in data]

        weights, slowdowns = pad_weights(weights, slowdowns)
        weights  = np.vstack(weights)
        all_data = np.vstack(slowdowns)


        for i in range(0, 2):
            ax.scatter(all_data[:,0], all_data[:,i], label=LABELS[i], color=COLORS[i], marker='.')
            if i == 0:
                continue
            # m, b = np.polyfit(all_data[:,0], all_data[:,i], 1)
            m, b, r, _, _ = linregress(all_data[:,0], all_data[:,i])
            x = np.vstack([np.arange(0, 100, 0.01), np.ones(10000)]).T
            print "y = %f * x + %f : r^2 = %f" % (m, b, r ** 2)
            y = m * x + b
            ax.plot(x, y, color=COLORS[i+2])
            textX = np.max(all_data[:,0]) / 2.0
            textY = np.max(all_data[:,i]) / 1.8
            plt.text(textX, textY, '$y = %0.3f x + %0.3f$ \n $r^2 = %0.3f$' % (m, b, r**2), fontsize=20,
                     color='k',
                     horizontalalignment='center',
                     verticalalignment='bottom')


        plt.legend(loc='upper left')
        plt.ylim((0, 70))
        plt.xlim((0, 70))
        ax.set_xlabel("Racket gradual typing overhead")
        ax.set_ylabel("overhead relative to Racket")
        plt.savefig("figs/aggregate-slowdown-%d.pdf" % number)