Пример #1
0
def CmpNormalModelWithDataSample():
    firsts, others, babies = Babies.PartitionBabies()
    weights = Babies.GetWightList(babies)
    pmf = Pmf.MakePmfFromList(weights)
    mu = pmf.Mean()
    var = pmf.Var(mu)
    sigma = math.sqrt(var)
    print("mu = {}, var = {}, sigma = {}".format(mu, var, sigma))

    # 经验分布, 数据
    cdf = Cdf.MakeCdfFromPmf(pmf, name='data')
    myplot.cdf(cdf)

    # u, sigma --> 误差函数计算 模型
    xs, yy = pmf.Render()
    ys = [erf.NormalCdf(x, mu, sigma) for x in xs]
    myplot.Plot(xs, ys, label='Model')
    myplot.Show()
    myplot.Clf()
Пример #2
0
    def save_plots(self, outdir, tag=None):
        # CDFs
        for cdf in AwazzaLogAnalyzer.CDFS:
            myplot.cdf(cdf['data'](self), numbins=cdf['numbins'],\
                xlabel=cdf['xlabel'],\
                labels=cdf['labels'],\
                filename=os.path.join(outdir, '%s_%s.pdf' % (tag, cdf['filename'])),\
                **cdf['kwargs'])

        # Data usage bar chart
        bar_labels = ('From Origin', 'To Client', 'Cached', 'Compressed', 'Closed Early')
        bar_values = (self._request_ints['kb-from-origin'] / 1000000.0,\
                       self._request_ints['kb-to-client'] / 1000000.0,\
                       self._request_ints['kb-cached'] / 1000000.0,\
                       self._request_ints['kb-compressed'] / 1000000.0,\
                       self._request_ints['kb-lost-client-closed'] / 1000000.0)
        myplot.plot([bar_labels], [bar_values], type='bar', label_bars=True,\
            ylabel='Data (GB)', bar_padding=0.5, barwidth=0.5,\
            filename=os.path.join(outdir, '%s_%s.pdf' % (tag, 'data_usage')))
Пример #3
0
def main():
    if len(args.hars) == 1 and os.path.isdir(args.hars[0]):
        harpaths = glob.glob(args.hars[0] + '/*.har')
    else:
        harpaths = args.hars
        
        
    sizes = []
    num_zeros = 0
    for harpath in harpaths:
        # load HAR
        try:
            har = Har.from_file(harpath)
            logging.info(har)
        except HarError:
            logging.exception('Error parsing HAR')
            continue

        # store object sizes in array
        for obj in har.objects:
            sizes.append(obj.size)
            if obj.size == 0:
                num_zeros += 1

    print num_zeros, 'zeros'
    print 'Mean:\t\t%f B' % numpy.mean(sizes)
    print 'Min: ', numpy.min(sizes)
    for percentile in (1, 10, 25, 50, 75, 90, 99):
        print '%dth percentile:\t%f B' % (percentile,\
            numpy.percentile(sizes, percentile))
    print 'Max: ', numpy.max(sizes)

    myplot.cdf([numpy.array(sizes)/1024.0], xlabel='Object Size (kB)',\
        title='Object Sizes in Alexa Top 500', filename='./object_size_cdf.pdf')
    
    myplot.cdf([sizes], xlabel='Object Size (kB)', xscale='log',\
        title='Object Sizes in Alexa Top 500', filename='./object_size_cdf_log.pdf')
Пример #4
0
def main():
    if len(args.hars) == 1 and os.path.isdir(args.hars[0]):
        harpaths = glob.glob(args.hars[0] + '/*.har')
    else:
        harpaths = args.hars

    sizes = []
    num_zeros = 0
    for harpath in harpaths:
        # load HAR
        try:
            har = Har.from_file(harpath)
            logging.info(har)
        except HarError:
            logging.exception('Error parsing HAR')
            continue

        # store object sizes in array
        for obj in har.objects:
            sizes.append(obj.size)
            if obj.size == 0:
                num_zeros += 1

    print num_zeros, 'zeros'
    print 'Mean:\t\t%f B' % numpy.mean(sizes)
    print 'Min: ', numpy.min(sizes)
    for percentile in (1, 10, 25, 50, 75, 90, 99):
        print '%dth percentile:\t%f B' % (percentile,\
            numpy.percentile(sizes, percentile))
    print 'Max: ', numpy.max(sizes)

    myplot.cdf([numpy.array(sizes)/1024.0], xlabel='Object Size (kB)',\
        title='Object Sizes in Alexa Top 500', filename='./object_size_cdf.pdf')

    myplot.cdf([sizes], xlabel='Object Size (kB)', xscale='log',\
        title='Object Sizes in Alexa Top 500', filename='./object_size_cdf_log.pdf')
Пример #5
0
sys.path.append('../../tools/myplot')
import myplot


##
## context plots
##

mean_configs_per_user_across_contexts = []
with open('context_results', 'r') as f:
    lines = f.readlines()
    mean_configs_per_user_across_contexts = eval(lines[-2].strip())
f.closed

myplot.cdf([mean_configs_per_user_across_contexts ], height_scale=0.6,
    xlabel='Mean # Unique Configurations per Context across 1000 Users',
    filename='configs_per_user.pdf')


##
## conflict plots
##

mean_conflicts_per_app_across_contexts = []
num_contexts_per_app_with_any_conflict = []
with open('conflict_results2', 'r') as f:
    lines = f.readlines()
    mean_conflicts_per_app_across_contexts = eval(lines[-4].strip())
    num_contexts_per_app_with_any_conflict = eval(lines[-2].strip())
f.closed
Пример #6
0
def plot_results(filename_to_results, filenames=None):
    # use the filenames list to make sure we process files in order
    # (so we can control the order of the series on the plot)
    if not filenames: filenames = filename_to_results.keys()
    
    filename_to_data = defaultdict(lambda: defaultdict(list))
    fraction_data = []
    fraction_labels = []
    absolute_data = []
    absolute_labels = []
    mean_percents_by_size = []
    mean_absolutes_by_size = []
    mean_by_size_xs = []
    mean_by_size_ys = []
    mean_by_size_yerrs = []
    mean_by_size_labels = []
    
    for filename in filenames:
        results = filename_to_results[filename]
        for r in results:
            if r.status == SUCCESS:
                filename_to_data[filename]['both_success'].append(r.url)

                filename_to_data[filename]['mean_percent_inflations'].append(r.https_mean / r.http_mean)
                filename_to_data[filename]['mean_absolute_inflations'].append(r.https_mean - r.http_mean)
                filename_to_data[filename]['median_percent_inflations'].append(r.https_median / r.http_median)
                filename_to_data[filename]['median_absolute_inflations'].append(r.https_median - r.http_median)
                if r.size:
                    filename_to_data[filename]['mean_percent_by_size'].append( (r.size/1000.0, r.https_mean / r.http_mean, r.http_stddev) )
                    filename_to_data[filename]['mean_absolute_by_size'].append( (r.size/1000.0, r.https_mean - r.http_mean, r.http_stddev) )
                    filename_to_data[filename]['mean_http_by_size'].append( (r.size/1000.0, r.http_mean, r.http_stddev) )
                    filename_to_data[filename]['mean_https_by_size'].append( (r.size/1000.0, r.https_mean, r.http_stddev) )
            elif r.status == FAILURE_NO_HTTP:
                filename_to_data[filename]['no_http'].append(r.url)
            elif r.status == FAILURE_NO_HTTPS:
                filename_to_data[filename]['no_https'].append(r.url)
            else:
                filename_to_data[filename]['other_error'].append(r.url)

        print '%i sites were accessible over both protocols' %\
            len(filename_to_data[filename]['both_success'])
        print '%i sites were not accessible over HTTP' %\
            len(filename_to_data[filename]['no_http'])
        print '%i sites were not accessible over HTTPS' %\
            len(filename_to_data[filename]['no_https'])
        print '%i sites were not accessible for other reasons' %\
            len(filename_to_data[filename]['other_error'])

        if 'pit' in filename:
            location = 'PIT'
        elif '3g' in filename:
            location = '3G'
        else:
            location = 'Fiber'

        fraction_data.append(filename_to_data[filename]['mean_percent_inflations'])
        fraction_labels.append('Mean (%s)' % location)
        fraction_data.append(filename_to_data[filename]['median_percent_inflations'])
        fraction_labels.append('Median (%s)' % location)

        absolute_data.append(numpy.array(filename_to_data[filename]['mean_absolute_inflations']))# * 1000)  # s -> ms
        absolute_labels.append('Mean (%s)' % location)
        absolute_data.append(numpy.array(filename_to_data[filename]['median_absolute_inflations']))# * 1000)  # s -> ms
        absolute_labels.append('Median (%s)' % location)

        try:
            mean_by_size_xs.append(zip(*sorted(filename_to_data[filename]['mean_http_by_size']))[0])
            mean_by_size_ys.append(zip(*sorted(filename_to_data[filename]['mean_http_by_size']))[1])
            mean_by_size_yerrs.append(zip(*sorted(filename_to_data[filename]['mean_http_by_size']))[2])
            mean_by_size_labels.append('Mean HTTP (%s)' % location)
            mean_by_size_xs.append(zip(*sorted(filename_to_data[filename]['mean_https_by_size']))[0])
            mean_by_size_ys.append(zip(*sorted(filename_to_data[filename]['mean_https_by_size']))[1])
            mean_by_size_yerrs.append(zip(*sorted(filename_to_data[filename]['mean_https_by_size']))[2])
            mean_by_size_labels.append('Mean HTTPS (%s)' % location)
        except Exception as e:
            logging.warn('Error processing size data: %s' % e)

        if location == 'BCN':
            mean_percents_by_size.append(filename_to_data[filename]['mean_percent_by_size'])
            mean_absolutes_by_size.append(filename_to_data[filename]['mean_absolute_by_size'])
    

    myplot.cdf(fraction_data,
        xlabel='Load Time Ratio (HTTPS/HTTP)', labels=fraction_labels,
        filename=os.path.join(args.outdir, '%s_fraction_inflation.pdf' % args.tag),
        height_scale=0.7, numbins=10000, xlim=(1, 3), legend='lower right')

    myplot.cdf(absolute_data,
        xlabel='Load Time Difference (HTTPS-HTTP) [s]', labels=absolute_labels,
        filename=os.path.join(args.outdir, '%s_absolute_inflation.pdf' % args.tag),
        height_scale=0.7, numbins=10000, xlim=(0,3), legend='lower right')
    
    myplot.cdf(absolute_data,
        xlabel='Load Time Difference (HTTPS-HTTP) [s]', labels=absolute_labels,
        filename=os.path.join(args.outdir, '%s_absolute_inflation_log.pdf' % args.tag),
        height_scale=0.7, numbins=10000, xscale='log', xlim=(0, 10), legend='lower right')

    # Plot fraction and absolute in same figure as subplots
    fig, ax_array = myplot.subplots(1, 2, height_scale=0.75, width_scale=1.2)
    myplot.cdf(fraction_data, fig=fig, ax=ax_array[0],
        xlabel='Load Time Ratio\n(HTTPS/HTTP)', labels=fraction_labels,
        numbins=10000, xlim=(1, 3), show_legend=False)

    lines, labels = myplot.cdf(absolute_data, fig=fig, ax=ax_array[1],
        xlabel='Load Time Difference\n(HTTPS-HTTP) [s]', labels=absolute_labels,
        numbins=10000, xlim=(0,3), legend='lower right', labelspacing=0.1, handletextpad=0.4)

    # shrink plots to make room for legend underneath
    #for ax in ax_array:
    #    box = ax.get_position()
    #    ax.set_position([box.x0, box.y0 + box.height * 0.25,
    #             box.width, box.height * 0.75])
    
    # shrink plots to make room for title above
    for ax in ax_array:
        box = ax.get_position()
        ax.set_position([box.x0, box.y0,
                 box.width, box.height * 0.95])

    #myplot.save_plot(os.path.join(args.outdir, '%s_combined_inflation_no_legend.pdf' % args.tag))
    #fig.legend(lines, labels, loc='lower center', ncol=2, prop={'size':20}, frameon=False,
    #    bbox_to_anchor=(.5, -.03))
    fig.suptitle('O-Proxy Top 2000 Objects')
    myplot.save_plot(os.path.join(args.outdir, '%s_combined_inflation.pdf' % args.tag))


    try:
        myplot.plot([zip(*mean_percents_by_size[0])[0]], [zip(*mean_percents_by_size[0])[1]],
            xlabel='Object Size (KB)', ylabel='Fraction Inflation (HTTPS/HTTP)',
            linestyles=[''], xscale='log', 
            filename=os.path.join(args.outdir, '%s_fraction_by_size.pdf' % args.tag))
    
        myplot.plot([zip(*mean_absolutes_by_size[0])[0]], [zip(*mean_absolutes_by_size[0])[1]],
            xlabel='Object Size (KB)', ylabel='Absolute Inflation (HTTPS-HTTP) [sec]',
            linestyles=[''], xscale='log',
            filename=os.path.join(args.outdir, '%s_absolute_by_size.pdf' % args.tag))
    
        myplot.plot(mean_by_size_xs, mean_by_size_ys, yerrs=mean_by_size_yerrs,
            xlabel='Object Size (KB)', ylabel='Load Time [sec]', xscale='log',
            marker=None, labels=mean_by_size_labels,# legend='lower left',
            legend_cols=2, width_scale=2,
            filename=os.path.join(args.outdir, '%s_mean_lt_by_size.pdf' % args.tag))
    except Exception as e:
        logging.warn('Error processing size data: %s', e)
Пример #7
0
def plot_results(filename_to_results, filenames=None):
    # use the filenames list to make sure we process files in order
    # (so we can control the order of the series on the plot)
    if not filenames: filenames = filename_to_results.keys()

    filename_to_data = defaultdict(lambda: defaultdict(list))
    fraction_data = []
    fraction_labels = []
    absolute_data = []
    absolute_labels = []
    mean_percents_by_size = []
    mean_absolutes_by_size = []
    mean_by_size_xs = []
    mean_by_size_ys = []
    mean_by_size_yerrs = []
    mean_by_size_labels = []

    for filename in filenames:
        results = filename_to_results[filename]
        for r in results:
            if r.status == SUCCESS:
                filename_to_data[filename]['both_success'].append(r.url)

                filename_to_data[filename]['mean_percent_inflations'].append(
                    r.https_mean / r.http_mean)
                filename_to_data[filename]['mean_absolute_inflations'].append(
                    r.https_mean - r.http_mean)
                filename_to_data[filename]['median_percent_inflations'].append(
                    r.https_median / r.http_median)
                filename_to_data[filename][
                    'median_absolute_inflations'].append(r.https_median -
                                                         r.http_median)
                if r.size:
                    filename_to_data[filename]['mean_percent_by_size'].append(
                        (r.size / 1000.0, r.https_mean / r.http_mean,
                         r.http_stddev))
                    filename_to_data[filename]['mean_absolute_by_size'].append(
                        (r.size / 1000.0, r.https_mean - r.http_mean,
                         r.http_stddev))
                    filename_to_data[filename]['mean_http_by_size'].append(
                        (r.size / 1000.0, r.http_mean, r.http_stddev))
                    filename_to_data[filename]['mean_https_by_size'].append(
                        (r.size / 1000.0, r.https_mean, r.http_stddev))
            elif r.status == FAILURE_NO_HTTP:
                filename_to_data[filename]['no_http'].append(r.url)
            elif r.status == FAILURE_NO_HTTPS:
                filename_to_data[filename]['no_https'].append(r.url)
            else:
                filename_to_data[filename]['other_error'].append(r.url)

        print '%i sites were accessible over both protocols' %\
            len(filename_to_data[filename]['both_success'])
        print '%i sites were not accessible over HTTP' %\
            len(filename_to_data[filename]['no_http'])
        print '%i sites were not accessible over HTTPS' %\
            len(filename_to_data[filename]['no_https'])
        print '%i sites were not accessible for other reasons' %\
            len(filename_to_data[filename]['other_error'])

        if 'pit' in filename:
            location = 'PIT'
        elif '3g' in filename:
            location = '3G'
        else:
            location = 'Fiber'

        fraction_data.append(
            filename_to_data[filename]['mean_percent_inflations'])
        fraction_labels.append('Mean (%s)' % location)
        fraction_data.append(
            filename_to_data[filename]['median_percent_inflations'])
        fraction_labels.append('Median (%s)' % location)

        absolute_data.append(
            numpy.array(filename_to_data[filename]
                        ['mean_absolute_inflations']))  # * 1000)  # s -> ms
        absolute_labels.append('Mean (%s)' % location)
        absolute_data.append(
            numpy.array(filename_to_data[filename]
                        ['median_absolute_inflations']))  # * 1000)  # s -> ms
        absolute_labels.append('Median (%s)' % location)

        try:
            mean_by_size_xs.append(
                zip(*sorted(filename_to_data[filename]['mean_http_by_size']))
                [0])
            mean_by_size_ys.append(
                zip(*sorted(filename_to_data[filename]['mean_http_by_size']))
                [1])
            mean_by_size_yerrs.append(
                zip(*sorted(filename_to_data[filename]['mean_http_by_size']))
                [2])
            mean_by_size_labels.append('Mean HTTP (%s)' % location)
            mean_by_size_xs.append(
                zip(*sorted(filename_to_data[filename]['mean_https_by_size']))
                [0])
            mean_by_size_ys.append(
                zip(*sorted(filename_to_data[filename]['mean_https_by_size']))
                [1])
            mean_by_size_yerrs.append(
                zip(*sorted(filename_to_data[filename]['mean_https_by_size']))
                [2])
            mean_by_size_labels.append('Mean HTTPS (%s)' % location)
        except Exception as e:
            logging.warn('Error processing size data: %s' % e)

        if location == 'BCN':
            mean_percents_by_size.append(
                filename_to_data[filename]['mean_percent_by_size'])
            mean_absolutes_by_size.append(
                filename_to_data[filename]['mean_absolute_by_size'])

    myplot.cdf(fraction_data,
               xlabel='Load Time Ratio (HTTPS/HTTP)',
               labels=fraction_labels,
               filename=os.path.join(args.outdir,
                                     '%s_fraction_inflation.pdf' % args.tag),
               height_scale=0.7,
               numbins=10000,
               xlim=(1, 3),
               legend='lower right')

    myplot.cdf(absolute_data,
               xlabel='Load Time Difference (HTTPS-HTTP) [s]',
               labels=absolute_labels,
               filename=os.path.join(args.outdir,
                                     '%s_absolute_inflation.pdf' % args.tag),
               height_scale=0.7,
               numbins=10000,
               xlim=(0, 3),
               legend='lower right')

    myplot.cdf(absolute_data,
               xlabel='Load Time Difference (HTTPS-HTTP) [s]',
               labels=absolute_labels,
               filename=os.path.join(
                   args.outdir, '%s_absolute_inflation_log.pdf' % args.tag),
               height_scale=0.7,
               numbins=10000,
               xscale='log',
               xlim=(0, 10),
               legend='lower right')

    # Plot fraction and absolute in same figure as subplots
    fig, ax_array = myplot.subplots(1, 2, height_scale=0.75, width_scale=1.2)
    myplot.cdf(fraction_data,
               fig=fig,
               ax=ax_array[0],
               xlabel='Load Time Ratio\n(HTTPS/HTTP)',
               labels=fraction_labels,
               numbins=10000,
               xlim=(1, 3),
               show_legend=False)

    lines, labels = myplot.cdf(absolute_data,
                               fig=fig,
                               ax=ax_array[1],
                               xlabel='Load Time Difference\n(HTTPS-HTTP) [s]',
                               labels=absolute_labels,
                               numbins=10000,
                               xlim=(0, 3),
                               legend='lower right',
                               labelspacing=0.1,
                               handletextpad=0.4)

    # shrink plots to make room for legend underneath
    #for ax in ax_array:
    #    box = ax.get_position()
    #    ax.set_position([box.x0, box.y0 + box.height * 0.25,
    #             box.width, box.height * 0.75])

    # shrink plots to make room for title above
    for ax in ax_array:
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width, box.height * 0.95])

    #myplot.save_plot(os.path.join(args.outdir, '%s_combined_inflation_no_legend.pdf' % args.tag))
    #fig.legend(lines, labels, loc='lower center', ncol=2, prop={'size':20}, frameon=False,
    #    bbox_to_anchor=(.5, -.03))
    fig.suptitle('O-Proxy Top 2000 Objects')
    myplot.save_plot(
        os.path.join(args.outdir, '%s_combined_inflation.pdf' % args.tag))

    try:
        myplot.plot([zip(*mean_percents_by_size[0])[0]],
                    [zip(*mean_percents_by_size[0])[1]],
                    xlabel='Object Size (KB)',
                    ylabel='Fraction Inflation (HTTPS/HTTP)',
                    linestyles=[''],
                    xscale='log',
                    filename=os.path.join(
                        args.outdir, '%s_fraction_by_size.pdf' % args.tag))

        myplot.plot([zip(*mean_absolutes_by_size[0])[0]],
                    [zip(*mean_absolutes_by_size[0])[1]],
                    xlabel='Object Size (KB)',
                    ylabel='Absolute Inflation (HTTPS-HTTP) [sec]',
                    linestyles=[''],
                    xscale='log',
                    filename=os.path.join(
                        args.outdir, '%s_absolute_by_size.pdf' % args.tag))

        myplot.plot(
            mean_by_size_xs,
            mean_by_size_ys,
            yerrs=mean_by_size_yerrs,
            xlabel='Object Size (KB)',
            ylabel='Load Time [sec]',
            xscale='log',
            marker=None,
            labels=mean_by_size_labels,  # legend='lower left',
            legend_cols=2,
            width_scale=2,
            filename=os.path.join(args.outdir,
                                  '%s_mean_lt_by_size.pdf' % args.tag))
    except Exception as e:
        logging.warn('Error processing size data: %s', e)
Пример #8
0
def compare_results(result_files):
    '''Takes pickled result files (produced by analyze_traces) and plots stuff'''

    filename_to_plt_list_dict = {}  # filename -> url -> list of PLTs  (seconds)
    filename_to_size_list_dict = {}  # filename -> url -> list of sizes  (bytes)

    for result_file in result_files:
        with open(result_file, 'r') as f:
            url_to_plts, url_to_sizes = cPickle.load(f)
        f.closed

        filename_to_plt_list_dict[result_file] = url_to_plts
        filename_to_size_list_dict[result_file] = url_to_sizes


    # collapse stats for each URL into mean and median
    filename_to_plt_mean_dict = defaultdict(dict)  #filename -> url -> mean PLT
    filename_to_plt_median_dict = defaultdict(dict)  #filename -> url -> median PLT
    for filename, plt_list_dict in filename_to_plt_list_dict.iteritems():
        for url, plt_list in plt_list_dict.iteritems():
            filename_to_plt_mean_dict[filename][url] = numpy.mean(plt_list)
            filename_to_plt_median_dict[filename][url] = numpy.median(plt_list)

    filename_to_size_mean_dict = defaultdict(dict)  # filename -> url -> mean size
    filename_to_size_median_dict = defaultdict(dict)  # filename -> url -> median size
    for filename, size_list_dict in filename_to_size_list_dict.iteritems():
        for url, size_list in size_list_dict.iteritems():
            filename_to_size_mean_dict[filename][url] = numpy.mean(size_list) / 1000000.0  # bytes -> MB
            filename_to_size_median_dict[filename][url] = numpy.median(size_list) / 1000000.0  # bytes -> MB


    # plot CDFs
    mean_plts = []
    median_plts = []
    mean_sizes = []
    median_sizes = []
    labels = []
    for filename in result_files:
        name = os.path.splitext(os.path.split(filename)[1])[0]
        labels.append(string.replace(string.replace(name, 'SPDY', 'Compression Proxy'), 'NoProxy', 'No Proxy'))
        mean_plts.append(filename_to_plt_mean_dict[filename].values())
        median_plts.append(filename_to_plt_median_dict[filename].values())
        mean_sizes.append(filename_to_size_mean_dict[filename].values())
        median_sizes.append(filename_to_size_median_dict[filename].values())

    # mean PLTs
    myplot.cdf(mean_plts, height_scale=0.7,
        xlabel='Mean Page Load Time (seconds)', labels=labels,
        filename=os.path.join(args.outdir, 'mean_plt.pdf'))

    myplot.cdf(median_plts, height_scale=0.7,
        xlabel='Median Page Load Time (seconds)', labels=labels,
        filename=os.path.join(args.outdir, 'median_plt.pdf'))
    
    myplot.cdf(mean_sizes, height_scale=0.7,
        xlabel='Mean Total Data Exchanged (MB)', labels=labels,
        xlim=(0, 5),
        filename=os.path.join(args.outdir, 'mean_size.pdf'))

    myplot.cdf(median_sizes, height_scale=0.7,
        xlabel='Median Total Data Exchanged (MB)', labels=labels,
        xlim=(0, 5),
        filename=os.path.join(args.outdir, 'median_size.pdf'))


    combined_sizes = []
    combined_labels = []
    for i in range(len(result_files)):
        combined_sizes.append(mean_sizes[i])
        combined_labels.append('%s (Mean)' % labels[i])
        combined_sizes.append(median_sizes[i])
        combined_labels.append('%s (Median)' % labels[i])

    myplot.cdf(combined_sizes, height_scale=0.7,
        xlabel='Total Data Exchanged [MB]', labels=combined_labels,
        xlim=(0, 5), labelspacing=0.1, handletextpad=0.4,
        filename=os.path.join(args.outdir, 'size.pdf'))
Пример #9
0
def plot_browser(machine, remote, result_files):

    ##
    ## plot 1: compare 4-slice version of all protocols
    ##
    out_filename, out_filepath = outfile(args.opt, remote, machine,\
        extra_tag='_proto-comparison')

    # need to make alternate dict of result paths
    temp_result_files = {}
    if 'spp_four-slices' in result_files:
        temp_result_files['spp'] = result_files['spp_four-slices']
    if 'spp_mod_four-slices' in result_files:
        temp_result_files['spp_mod'] = result_files['spp_mod_four-slices']
    if 'ssl_four-slices' in result_files:
        temp_result_files['ssl'] = result_files['ssl_four-slices']
    if 'ssl_mod_four-slices' in result_files:
        temp_result_files['ssl_mod'] = result_files['ssl_mod_four-slices']
    if 'fwd_four-slices' in result_files:
        temp_result_files['fwd'] = result_files['fwd_four-slices']
    if 'fwd_mod_four-slices' in result_files:
        temp_result_files['fwd_mod'] = result_files['fwd_mod_four-slices']
    if 'pln_four-slices' in result_files:
        temp_result_files['pln'] = result_files['pln_four-slices']
    if 'pln_mod_four-slices' in result_files:
        temp_result_files['pln_mod'] = result_files['pln_mod_four-slices']

    ys = []  # holds arrays of y values, 1 per series
    labels = []
    plot_title = ''

    for protocol in PROTOCOLS[args.opt]:
        if protocol not in temp_result_files: continue
        filepath = temp_result_files[protocol]
        print '[IN]', protocol, filepath
        data = numpy.loadtxt(filepath)
        if len(data) == 0 or\
           len(data.shape) != 2 or\
           data.shape[1] != 3:
            print 'WARNING: malformed data: %s' % filepath
            continue

        transform = numpy.vectorize(DATA_TRANSFORMS[args.opt])

        print protocol, max(transform(data[:, 2]))
        ys.append(transform(data[:, 2]))
        if protocol == 'spp':
            labels.append(LEGEND_STRINGS[protocol] + ' (4 Ctx)')
        elif protocol == 'spp_mod':
            labels.append('mcTLS (4 Ctx, Nagle Off)')
        else:
            labels.append(LEGEND_STRINGS[protocol])
        plot_title = title(args.opt, remote, data)

    print '[OUT]', out_filepath
    myplot.cdf(ys, labels=labels, xlabel=X_AXIS[args.opt],\
        #title=plot_title,\
        #builds = [[], [3], [3, 1, 2], [3, 1, 2, 0], [3, 1, 2, 0, 4]],\
        filename=out_filepath, **MANUAL_ARGS[out_filename])

    ##
    ## plot 2: compare slice strategies (SPP only)
    ##
    out_filename, out_filepath = outfile(args.opt, remote, machine,\
        extra_tag='_slicing-comparison')

    ys = []  # holds arrays of y values, 1 per series
    labels = []
    plot_title = ''

    SLICE_LEGEND = {
        'spp_one-slice': 'mcTLS (1 Ctx)',
        'spp_mod_one-slice': 'mcTLS (1 Ctx, Nagle Off)',
        'spp_four-slices': 'mcTLS (4 Ctx)',
        'spp_mod_four-slices': 'mcTLS (4 Ctx, Nagle Off)',
        'spp_slice-per-header': 'mcTLS (Ctx per Hdr)',
        'spp_mod_slice-per-header': 'mcTLS (Ctx per Hdr, Nagle Off)',
    }

    for protocol in ('spp_one-slice', 'spp_mod_one-slice', 'spp_four-slices',
                     'spp_mod_four-slices', 'spp_slice-per-header',
                     'spp_mod_slice-per-header'):
        if protocol not in result_files: continue
        filepath = result_files[protocol]
        print '[IN]', protocol, filepath
        data = numpy.loadtxt(filepath)
        if len(data) == 0 or\
           len(data.shape) != 2 or\
           data.shape[1] != 3:
            print 'WARNING: malformed data: %s' % filepath
            continue

        transform = numpy.vectorize(DATA_TRANSFORMS[args.opt])

        ys.append(transform(data[:, 2]))
        labels.append(SLICE_LEGEND[protocol])
        plot_title = title(args.opt, remote, data)

    print '[OUT]', out_filepath
    myplot.cdf(ys, labels=labels, xlabel=X_AXIS[args.opt],\
        #title=plot_title,\
        filename=out_filepath, **MANUAL_ARGS[out_filename])
Пример #10
0
def plot_browser(machine, remote, result_files):

    ##
    ## plot 1: compare 4-slice version of all protocols
    ##
    out_filename, out_filepath = outfile(args.opt, remote, machine,\
        extra_tag='_proto-comparison')

    # need to make alternate dict of result paths
    temp_result_files = {}
    if 'spp_four-slices' in result_files:
        temp_result_files['spp'] = result_files['spp_four-slices']
    if 'spp_mod_four-slices' in result_files:
        temp_result_files['spp_mod'] = result_files['spp_mod_four-slices']
    if 'ssl_four-slices' in result_files:
        temp_result_files['ssl'] = result_files['ssl_four-slices']
    if 'ssl_mod_four-slices' in result_files:
        temp_result_files['ssl_mod'] = result_files['ssl_mod_four-slices']
    if 'fwd_four-slices' in result_files:
        temp_result_files['fwd'] = result_files['fwd_four-slices']
    if 'fwd_mod_four-slices' in result_files:
        temp_result_files['fwd_mod'] = result_files['fwd_mod_four-slices']
    if 'pln_four-slices' in result_files:
        temp_result_files['pln'] = result_files['pln_four-slices']
    if 'pln_mod_four-slices' in result_files:
        temp_result_files['pln_mod'] = result_files['pln_mod_four-slices']
        
    ys = []  # holds arrays of y values, 1 per series
    labels = []
    plot_title = ''

    for protocol in PROTOCOLS[args.opt]:
        if protocol not in temp_result_files: continue
        filepath = temp_result_files[protocol]
        print '[IN]', protocol, filepath
        data = numpy.loadtxt(filepath)
        if len(data) == 0 or\
           len(data.shape) != 2 or\
           data.shape[1] != 3:
            print 'WARNING: malformed data: %s' % filepath
            continue
        
        transform = numpy.vectorize(DATA_TRANSFORMS[args.opt])

        print protocol, max(transform(data[:,2]))
        ys.append(transform(data[:,2]))
        if protocol == 'spp':
            labels.append(LEGEND_STRINGS[protocol] + ' (4 Ctx)')
        elif protocol == 'spp_mod':
            labels.append('mcTLS (4 Ctx, Nagle Off)')
        else:
            labels.append(LEGEND_STRINGS[protocol])
        plot_title = title(args.opt, remote, data)

    print '[OUT]', out_filepath
    myplot.cdf(ys, labels=labels, xlabel=X_AXIS[args.opt],\
        #title=plot_title,\
        #builds = [[], [3], [3, 1, 2], [3, 1, 2, 0], [3, 1, 2, 0, 4]],\
        filename=out_filepath, **MANUAL_ARGS[out_filename])
    
    
    ##
    ## plot 2: compare slice strategies (SPP only)
    ##
    out_filename, out_filepath = outfile(args.opt, remote, machine,\
        extra_tag='_slicing-comparison')

    ys = []  # holds arrays of y values, 1 per series
    labels = []
    plot_title = ''

    SLICE_LEGEND = {
        'spp_one-slice': 'mcTLS (1 Ctx)',
        'spp_mod_one-slice': 'mcTLS (1 Ctx, Nagle Off)',
        'spp_four-slices': 'mcTLS (4 Ctx)',
        'spp_mod_four-slices': 'mcTLS (4 Ctx, Nagle Off)',
        'spp_slice-per-header': 'mcTLS (Ctx per Hdr)',
        'spp_mod_slice-per-header': 'mcTLS (Ctx per Hdr, Nagle Off)',
    }

    for protocol in ('spp_one-slice', 'spp_mod_one-slice', 'spp_four-slices', 'spp_mod_four-slices', 'spp_slice-per-header', 'spp_mod_slice-per-header'):
        if protocol not in result_files: continue
        filepath = result_files[protocol]
        print '[IN]', protocol, filepath
        data = numpy.loadtxt(filepath)
        if len(data) == 0 or\
           len(data.shape) != 2 or\
           data.shape[1] != 3:
            print 'WARNING: malformed data: %s' % filepath
            continue
        
        transform = numpy.vectorize(DATA_TRANSFORMS[args.opt])

        ys.append(transform(data[:,2]))
        labels.append(SLICE_LEGEND[protocol])
        plot_title = title(args.opt, remote, data)

    print '[OUT]', out_filepath
    myplot.cdf(ys, labels=labels, xlabel=X_AXIS[args.opt],\
        #title=plot_title,\
        filename=out_filepath, **MANUAL_ARGS[out_filename])