コード例 #1
0
 def run(self,
         fig_num,
         is_noise=True,
         noise_level=10**(-2),
         output_csv=OUTPUT_CSV):
     """
     run simulation and save result
     :return:
     """
     tout, self.y, x = signal.lsim2(self.system, self.u, self.t)
     noise = np.zeros(len(self.t))
     if is_noise:
         noise = np.random.rand(len(self.t)) * noise_level * max(self.y)
     self.y += noise
     mycsv.save(self.t,
                self.u,
                self.y,
                save_name=self.path + output_csv,
                header=("t", "u", "y"))
     myplot.plot(fig_num, self.t, self.u)
     myplot.plot(fig_num, self.t, self.y)
     myplot.save(fig_num,
                 label=("time [s]", "u/y []"),
                 save_name=self.path + "Simulation_Result",
                 leg=("u", "y"))
コード例 #2
0
    def save_plots(self, outdir, tag=None):
        # CDFs
        for cdf in AwazzaLogAnalyzer.CDFS:
            myplot.cdf(cdf['data'](self), numbins=cdf['numbins'],\
                xlabel=cdf['xlabel'],\
                labels=cdf['labels'],\
                filename=os.path.join(outdir, '%s_%s.pdf' % (tag, cdf['filename'])),\
                **cdf['kwargs'])

        # Data usage bar chart
        bar_labels = ('From Origin', 'To Client', 'Cached', 'Compressed', 'Closed Early')
        bar_values = (self._request_ints['kb-from-origin'] / 1000000.0,\
                       self._request_ints['kb-to-client'] / 1000000.0,\
                       self._request_ints['kb-cached'] / 1000000.0,\
                       self._request_ints['kb-compressed'] / 1000000.0,\
                       self._request_ints['kb-lost-client-closed'] / 1000000.0)
        myplot.plot([bar_labels], [bar_values], type='bar', label_bars=True,\
            ylabel='Data (GB)', bar_padding=0.5, barwidth=0.5,\
            filename=os.path.join(outdir, '%s_%s.pdf' % (tag, 'data_usage')))
コード例 #3
0
def plot(msg):
    helboard = telebot.types.ReplyKeyboardMarkup(True, True)
    helpbutton = telebot.types.KeyboardButton('/plot 5:1 4:2 3:3 2:4')
    helboard.add(helpbutton)
    text = msg.text
    if len(re.findall(r'\s*\d+\s*:\s*\d+\s*', text)) == 0:
        send_mess = 'Чтобы воспользоваться коммандой /plot укажите через пробел пары чисел в формате Yi:Xi' \
                    ' пример приложе в клавиатуре'
        bot.send_message(msg.chat.id, send_mess, reply_markup=helboard)
    else:
        patY = r'\s*(\d+)\s*:'
        patX = r':\s*(\d+)\s*'
        try:
            Y = list(map(float, re.findall(patY, text)))
            X = list(map(float, re.findall(patX, text)))
            myplot.plot(X, Y, user)
            img = open('tmp.png', 'rb')
            bot.send_photo(msg.chat.id, img)
            img.close()
        except:
            bot.send_message(msg.chat.id,
                             "Что-то не так! Провертье формат записи Y:X",
                             reply_markup=helboard)
コード例 #4
0
# coding the matrix
# task 2.5.4

from myplot import plot

def scalar_vector_mult(alpha, v):
    return [alpha*v[i] for i in range(len(v))]

L = [[2, 2], [3, 2], [1.75, 1], [2, 1], [2.25, 1], [2.5, 1], [2,75, 1], [3, 1], [3.25, 1]]

L2 = [scalar_vector_mult(0.5, v) for v in L]
L3 = [scalar_vector_mult(-0.5, v) for v in L2]

L2.extend(L3)

plot(L2, size=4, file='task2.5.4.4.png')

コード例 #5
0
def plot_results(filename_to_results, filenames=None):
    # use the filenames list to make sure we process files in order
    # (so we can control the order of the series on the plot)
    if not filenames: filenames = filename_to_results.keys()

    filename_to_data = defaultdict(lambda: defaultdict(list))
    fraction_data = []
    fraction_labels = []
    absolute_data = []
    absolute_labels = []
    mean_percents_by_size = []
    mean_absolutes_by_size = []
    mean_by_size_xs = []
    mean_by_size_ys = []
    mean_by_size_yerrs = []
    mean_by_size_labels = []

    for filename in filenames:
        results = filename_to_results[filename]
        for r in results:
            if r.status == SUCCESS:
                filename_to_data[filename]['both_success'].append(r.url)

                filename_to_data[filename]['mean_percent_inflations'].append(
                    r.https_mean / r.http_mean)
                filename_to_data[filename]['mean_absolute_inflations'].append(
                    r.https_mean - r.http_mean)
                filename_to_data[filename]['median_percent_inflations'].append(
                    r.https_median / r.http_median)
                filename_to_data[filename][
                    'median_absolute_inflations'].append(r.https_median -
                                                         r.http_median)
                if r.size:
                    filename_to_data[filename]['mean_percent_by_size'].append(
                        (r.size / 1000.0, r.https_mean / r.http_mean,
                         r.http_stddev))
                    filename_to_data[filename]['mean_absolute_by_size'].append(
                        (r.size / 1000.0, r.https_mean - r.http_mean,
                         r.http_stddev))
                    filename_to_data[filename]['mean_http_by_size'].append(
                        (r.size / 1000.0, r.http_mean, r.http_stddev))
                    filename_to_data[filename]['mean_https_by_size'].append(
                        (r.size / 1000.0, r.https_mean, r.http_stddev))
            elif r.status == FAILURE_NO_HTTP:
                filename_to_data[filename]['no_http'].append(r.url)
            elif r.status == FAILURE_NO_HTTPS:
                filename_to_data[filename]['no_https'].append(r.url)
            else:
                filename_to_data[filename]['other_error'].append(r.url)

        print '%i sites were accessible over both protocols' %\
            len(filename_to_data[filename]['both_success'])
        print '%i sites were not accessible over HTTP' %\
            len(filename_to_data[filename]['no_http'])
        print '%i sites were not accessible over HTTPS' %\
            len(filename_to_data[filename]['no_https'])
        print '%i sites were not accessible for other reasons' %\
            len(filename_to_data[filename]['other_error'])

        if 'pit' in filename:
            location = 'PIT'
        elif '3g' in filename:
            location = '3G'
        else:
            location = 'Fiber'

        fraction_data.append(
            filename_to_data[filename]['mean_percent_inflations'])
        fraction_labels.append('Mean (%s)' % location)
        fraction_data.append(
            filename_to_data[filename]['median_percent_inflations'])
        fraction_labels.append('Median (%s)' % location)

        absolute_data.append(
            numpy.array(filename_to_data[filename]
                        ['mean_absolute_inflations']))  # * 1000)  # s -> ms
        absolute_labels.append('Mean (%s)' % location)
        absolute_data.append(
            numpy.array(filename_to_data[filename]
                        ['median_absolute_inflations']))  # * 1000)  # s -> ms
        absolute_labels.append('Median (%s)' % location)

        try:
            mean_by_size_xs.append(
                zip(*sorted(filename_to_data[filename]['mean_http_by_size']))
                [0])
            mean_by_size_ys.append(
                zip(*sorted(filename_to_data[filename]['mean_http_by_size']))
                [1])
            mean_by_size_yerrs.append(
                zip(*sorted(filename_to_data[filename]['mean_http_by_size']))
                [2])
            mean_by_size_labels.append('Mean HTTP (%s)' % location)
            mean_by_size_xs.append(
                zip(*sorted(filename_to_data[filename]['mean_https_by_size']))
                [0])
            mean_by_size_ys.append(
                zip(*sorted(filename_to_data[filename]['mean_https_by_size']))
                [1])
            mean_by_size_yerrs.append(
                zip(*sorted(filename_to_data[filename]['mean_https_by_size']))
                [2])
            mean_by_size_labels.append('Mean HTTPS (%s)' % location)
        except Exception as e:
            logging.warn('Error processing size data: %s' % e)

        if location == 'BCN':
            mean_percents_by_size.append(
                filename_to_data[filename]['mean_percent_by_size'])
            mean_absolutes_by_size.append(
                filename_to_data[filename]['mean_absolute_by_size'])

    myplot.cdf(fraction_data,
               xlabel='Load Time Ratio (HTTPS/HTTP)',
               labels=fraction_labels,
               filename=os.path.join(args.outdir,
                                     '%s_fraction_inflation.pdf' % args.tag),
               height_scale=0.7,
               numbins=10000,
               xlim=(1, 3),
               legend='lower right')

    myplot.cdf(absolute_data,
               xlabel='Load Time Difference (HTTPS-HTTP) [s]',
               labels=absolute_labels,
               filename=os.path.join(args.outdir,
                                     '%s_absolute_inflation.pdf' % args.tag),
               height_scale=0.7,
               numbins=10000,
               xlim=(0, 3),
               legend='lower right')

    myplot.cdf(absolute_data,
               xlabel='Load Time Difference (HTTPS-HTTP) [s]',
               labels=absolute_labels,
               filename=os.path.join(
                   args.outdir, '%s_absolute_inflation_log.pdf' % args.tag),
               height_scale=0.7,
               numbins=10000,
               xscale='log',
               xlim=(0, 10),
               legend='lower right')

    # Plot fraction and absolute in same figure as subplots
    fig, ax_array = myplot.subplots(1, 2, height_scale=0.75, width_scale=1.2)
    myplot.cdf(fraction_data,
               fig=fig,
               ax=ax_array[0],
               xlabel='Load Time Ratio\n(HTTPS/HTTP)',
               labels=fraction_labels,
               numbins=10000,
               xlim=(1, 3),
               show_legend=False)

    lines, labels = myplot.cdf(absolute_data,
                               fig=fig,
                               ax=ax_array[1],
                               xlabel='Load Time Difference\n(HTTPS-HTTP) [s]',
                               labels=absolute_labels,
                               numbins=10000,
                               xlim=(0, 3),
                               legend='lower right',
                               labelspacing=0.1,
                               handletextpad=0.4)

    # shrink plots to make room for legend underneath
    #for ax in ax_array:
    #    box = ax.get_position()
    #    ax.set_position([box.x0, box.y0 + box.height * 0.25,
    #             box.width, box.height * 0.75])

    # shrink plots to make room for title above
    for ax in ax_array:
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width, box.height * 0.95])

    #myplot.save_plot(os.path.join(args.outdir, '%s_combined_inflation_no_legend.pdf' % args.tag))
    #fig.legend(lines, labels, loc='lower center', ncol=2, prop={'size':20}, frameon=False,
    #    bbox_to_anchor=(.5, -.03))
    fig.suptitle('O-Proxy Top 2000 Objects')
    myplot.save_plot(
        os.path.join(args.outdir, '%s_combined_inflation.pdf' % args.tag))

    try:
        myplot.plot([zip(*mean_percents_by_size[0])[0]],
                    [zip(*mean_percents_by_size[0])[1]],
                    xlabel='Object Size (KB)',
                    ylabel='Fraction Inflation (HTTPS/HTTP)',
                    linestyles=[''],
                    xscale='log',
                    filename=os.path.join(
                        args.outdir, '%s_fraction_by_size.pdf' % args.tag))

        myplot.plot([zip(*mean_absolutes_by_size[0])[0]],
                    [zip(*mean_absolutes_by_size[0])[1]],
                    xlabel='Object Size (KB)',
                    ylabel='Absolute Inflation (HTTPS-HTTP) [sec]',
                    linestyles=[''],
                    xscale='log',
                    filename=os.path.join(
                        args.outdir, '%s_absolute_by_size.pdf' % args.tag))

        myplot.plot(
            mean_by_size_xs,
            mean_by_size_ys,
            yerrs=mean_by_size_yerrs,
            xlabel='Object Size (KB)',
            ylabel='Load Time [sec]',
            xscale='log',
            marker=None,
            labels=mean_by_size_labels,  # legend='lower left',
            legend_cols=2,
            width_scale=2,
            filename=os.path.join(args.outdir,
                                  '%s_mean_lt_by_size.pdf' % args.tag))
    except Exception as e:
        logging.warn('Error processing size data: %s', e)
コード例 #6
0
# coding the matrix
# task 2.4.3
from myplot import plot


def add2(v, w):
    return [v[0] + w[0], v[1] + w[1]]


L = [[2, 2], [3, 2], [1.75, 1], [2, 1], [2.25, 1], [2.5, 1], [2, 75, 1],
     [3, 1], [3.25, 1]]

plot([add2(v, [1, 2]) for v in L], size=4, file='task2.4.3.png')
コード例 #7
0
# #     sampleY.append(y[int(math.pow(10, i))]);
#
# sampleY = lengthOfLog;
#
# # 拟合
# params = ss.norm.fit(sampleY);
# norm = ss.norm.pdf(sampleX, loc=params[0], scale=params[1]);
# print (params);
# sum = 0;
# for i in range(0, len(norm)):
#     norm[i] /= 100;
#     sum += norm[i];
# print sum;

#使用累积值
ysum = y
for i in range(1, np.power(10, powerRank)):
    ysum[i] = ysum[i] + ysum[i-1]

# 画图
# myplot.plot(x, y, label='count', xlabel='Length', ylabel='Views', xAxieIsLog=True, yAxieIsLog=False);
print("plotting...");
# fig2_1
# myplot.plot(x, y, xlabel='Length', ylabel='Count', label='Source data', linewidth=1, xAxieIsLog=True, deleteZero=False);

# fig2_2
myplot.plot(x, ysum, xlabel='Length', ylabel='Aggregation of videos', label='', linewidth=1, xAxieIsLog=True, deleteZero=False);
# plt.plot(sampleX, norm, label='Normal distribution', linewidth=1, color='red');

# plt.plot(sampleX, sampleY, color='green', label='count', linewidth=1);
# plt.ylabel('Count for length >= x');
コード例 #8
0
ファイル: main.py プロジェクト: youssefaly97/Graph-Plotter
import parse_function as pf
import numpy as np
import myplot as mp
#import matplotlib as plt

error = ["File is empty", "Error: File not found", "No data files found"]

selfname = sys.argv[0].split('/')[-1]

try:
    filelist.clear()
    x.clear()
except:
    i = 0

for i in range(0, len(os.listdir())):
    if ("." in os.listdir()[i] and ".txt" in os.listdir()[i]):
        try:
            filelist.append(os.listdir()[i])
        except NameError:
            filelist = ([os.listdir()[i]])

#for i in range(0,len(filelist)):
#x = np.append(x,[pf.parse(filelist[i])],2)

x = pf.parse(filelist[1])
if (type(x) == int):
    print(error[x - 1])

mp.plot(x[0][:, 0], x[0][:, 1], x[0][:, 1], 0, x[0, 0, 3], x[0, 0, 4])
コード例 #9
0
#emu = np.append(emu,np.zeros((1,5)),0)
x86 = np.delete(x86, -1, 0)

if (type(emu) == int):
    print(error[emu - 1])

#mp.plot(np.array((emu[0][:,0]),dtype=int),emu[0][:,1],x86[0][:,1],0,int(x[0,0,3]),int(x[0,0,4]))

#mp.plot(np.array((emu[sp][:,0]),dtype=int), #thread count axis
#        emu[sp][:,op+1],                    #EMU clock cycles
#        x86[sp][:,op+1],                    #x86 clock cycles
#        op,                                 #operation
#        int(emu[sp,0,3]),                   #sparsity
#        int(emu[sp,0,4]),                   #matrix size
#        ps)

plt = mp.plot(
    np.array((x86[:, 0]), dtype=int),  #X data
    np.array(
        [emu[:, 2], x86[:, 2], -2.8 * x86[:, 2], 1.6 * x86[:, 2],
         x86[:, 2]]),  #Y data
    [0, 0, 1, 0, 0],  #Plot Type
    ["blue", "red", "purple", "green", "orange"],  #Colors
    [
        "Threads", "Clock Cycles", "EMU vs AMD", "EMU", "x86", "x88", "x89",
        "x80"
    ],
    [1, 0, 1, 1, 0])  #Labels

plt.savefig('test.svg', transparent=True)
コード例 #10
0
def plotall(fig, fbc, ndata=NDATA, datapath=DATA):
    """

    :param fig:
    :return:
    """
    fbc.split(ndata)
    olist = fbc.olist
    o = olist[0]
    F = len(o)
    Llist = fbc.Llist
    gcf = fbc.calc_gcf()
    print("Gain Crossover Frequencies (Hz):", gcf)
    print("\tMin. (Hz):", min(gcf))
    print("\tAve. (Hz):", np.mean(gcf))
    print("\tMax. (Hz):", max(gcf))
    f_gc = min(gcf)

    ##########Plot1##########
    fig += 1
    for L in Llist:
        # Nyquist
        myplot.plot(fig, np.real(L), np.imag(L), lw=6, line_style="-")

    _theta = np.linspace(0, 2 * np.pi, 100)
    myplot.plot(fig, (np.cos(_theta)), (np.sin(_theta)),
                line_style="r--",
                lw=1)  # r=1
    myplot.plot(fig, (0, ), (0, ), line_style="r+")  # origin
    myplot.plot(fig, (-1, ), (0, ), line_style="r+")  # critical
    # Stanility Constraint
    myplot.plot(fig, (fbc.rm * np.cos(_theta)) + fbc.sigma,
                fbc.rm * np.sin(_theta),
                line_style="y:")  # r=1
    myplot.plot(
        fig, (-1 / fbc.g_dgm, -np.cos(fbc.theta_dpm), -np.cos(fbc.theta_dpm2)),
        (0, -np.sin(fbc.theta_dpm), np.sin(fbc.theta_dpm2)),
        line_style="yo")
    myplot.save(fig,
                xl=[-1, 1],
                yl=[-1, 1],
                leg=(*["Nyquist" + str(k) for k in range(ndata)], "r=1",
                     "Origin", "(-1,j0)", "Stb. Cond.", "Margins"),
                label=("Re", "Im"),
                save_name=datapath + "/" + str(fig) + "_nyquist_enlarged",
                title="Optimized Gain-Crossover Frequency (Hz): " + str(f_gc))

    myplot.save(fig,
                xl=[-3, 3],
                yl=[-3, 3],
                leg=(*["Nyquist" + str(k) for k in range(ndata)], "r=1",
                     "Origin", "(-1,j0)", "Stb. Cond.", "Margins"),
                label=("Re", "Im"),
                save_name=datapath + "/" + str(fig) + "_nyquist",
                title="Optimized Gain-Crossover Frequency (Hz): " + str(f_gc))

    ##########Plot2##########
    fig += 1
    # L(s)
    for L in Llist:
        myplot.bodeplot(fig,
                        o[:F] / 2 / np.pi,
                        20 * np.log10(abs(L)),
                        np.angle(L, deg=True),
                        line_style='-')
    myplot.save(fig,
                save_name=datapath + "/" + str(fig) + "_bode",
                title="Openloop L(s)",
                leg=["L" + str(k) for k in range(ndata)])

    ##########Plot3##########
    fig += 1
    try:
        c = fbc.freqresp()[:F]
    except:
        c = fbc.C[:F]
    myplot.bodeplot(fig,
                    o[:F] / 2 / np.pi,
                    20 * np.log10(abs(c)),
                    np.angle(c, deg=True),
                    line_style='-')
    g = fbc.g[:F]
    myplot.bodeplot(fig,
                    o[:F] / 2 / np.pi,
                    20 * np.log10(abs(g)),
                    np.angle(g, deg=True),
                    line_style='-')
    myplot.save(fig,
                title='C(s)',
                leg=("Controller", "Plant"),
                save_name=datapath + "/Controller")

    ##########Plot4##########
    fig += 1
    m = fbc.nominal_sensitivity / (-20)
    x = (fbc.o_dgc / fbc.o[:F])**m
    myplot.plot(fig,
                o[:F] / 2 / np.pi,
                -20 * np.log10(abs(x)),
                line_style='k--',
                plotfunc=plt.semilogx)
    for L in Llist:
        T = 1 / (1 + L)
        S = 1 - T
        myplot.plot(fig,
                    o[:F] / 2 / np.pi,
                    20 * np.log10(abs(T)),
                    line_style='b-',
                    plotfunc=plt.semilogx)
        myplot.plot(fig,
                    o[:F] / 2 / np.pi,
                    20 * np.log10(abs(S)),
                    line_style='r-',
                    plotfunc=plt.semilogx)
    myplot.save(fig,
                save_name=datapath + "/" + str(fig) + "_ST",
                title="S(s) (Blue) and T(s) (Red).",
                leg=("Nominal:" + str(fbc.nominal_sensitivity) + " dB/dec", ),
                yl=(-80, 10))

    ##########Plot5##########
    fig += 1
    try:
        c = fbc.freqresp(obj="pid")[:F]
        myplot.bodeplot(fig,
                        o[:F] / 2 / np.pi,
                        20 * np.log10(abs(c)),
                        np.angle(c, deg=True),
                        line_style='-')
    except:
        pass
    try:
        c = fbc.freqresp(obj="fir")[:F]
        myplot.bodeplot(fig,
                        o[:F] / 2 / np.pi,
                        20 * np.log10(abs(c)),
                        np.angle(c, deg=True),
                        line_style='-')
    except:
        pass
    myplot.save(fig, title='Controllers', leg=("PID", "FIR"))

    return fig
コード例 #11
0
for i in x:
    z[i] = y[i] * i

# 计算累积值
for i in range(1, np.power(10, powerRank)):
    z[i] = z[i] + z[i - 1]

# 去掉 y = 0 的点,加快绘图速度
xx = []
yy = []
for i in x:
    if y[i] != 0:
        xx.append(x[i])
        yy.append(y[i])

print("data size: " + len(xx).__str__())
# 画图

#fig1_1
myplot.plot(xx,
            yy,
            label='',
            xlabel='Views',
            ylabel='Number of videos',
            xAxieIsLog=True,
            yAxieIsLog=False)

#fig1_2
# myplot.plot(x, y, label='', xlabel='Views', ylabel='Aggregation of views where views <= x', xAxieIsLog=True, yAxieIsLog=False);

# myplot.scatter(x, z, label='count', xlabel='Views', ylabel='Number of video', xAxieIsLog=True, yAxieIsLog=False);
コード例 #12
0
#(8,10) 512
#(7,9) 1024
emu = pf.parse(filelist[3])
x86 = pf.parse(filelist[1])

#if emu[sp,0,4] != x86[sp,0,4]:
#    print("Matrix Size Mismatch")

#x86 = np.append(np.zeros((2,1,5)),x86,1)
#x86 = np.append(x86,np.zeros((2,6,5)),1)

if (type(emu) == int):
    print(error[emu-1])

#mp.plot(np.array((emu[0][:,0]),dtype=int),emu[0][:,1],x86[0][:,1],0,int(x[0,0,3]),int(x[0,0,4]))

#mp.plot(np.array((emu[sp][:,0]),dtype=int), #thread count axis
#        emu[sp][:,op+1],                    #EMU clock cycles
#        x86[sp][:,op+1],                    #x86 clock cycles
#        op,                                 #operation
#        int(emu[sp,0,3]),                   #sparsity
#        int(emu[sp,0,4]),                   #matrix size
#        ps)
    
mp.plot(np.array((emu[:,0]),dtype=int), #thread count axis
        emu[:,2],                    #EMU clock cycles
        x86[:,2],                    #x86 clock cycles
        op,                                 #operation
        int(emu[0,3]),                   #sparsity
        int(emu[0,4]),                   #matrix size
        ps)
コード例 #13
0
# coding the matrix
# task 2.6.9
from myplot import plot

period = 100

pt1 = [3.5, 3]
pt2 = [0.5, 1]

s = []

for a in range(period):
    alpha = (1.0 / period) * a
    beta = 1.0 - alpha
    x = pt1[0] * alpha + pt2[0] * beta
    y = pt1[1] * alpha + pt2[1] * beta
    s.append((x, y))

plot(s, file='task2.6.9.png')
コード例 #14
0
# coding the matrix
# task 2.4.2
from myplot import plot

L = [[2, 2], [3, 2], [1.75, 1], [2, 1], [2.25, 1], [2.5, 1], [2, 75, 1],
     [3, 1], [3.25, 1]]

plot(L, size=4, file='task2.4.2.png')
コード例 #15
0
ファイル: proj4.py プロジェクト: luyh7/graduation-project
# 使用累积平均值
avrg_sumY = list(avrgY)
myplot.normalize(avrg_sumY)
for i in range(1, np.power(10, powerRank)):
    avrg_sumY[i] = avrg_sumY[i] + avrg_sumY[i - 1]

x = range(0, np.power(10, powerRank))

y = list(viewTimes)
print(y[0])
print(y[1])

#fig4_1
# myplot.plot(x, y, label='', xlabel='Ratings', ylabel='Views', xAxieIsLog=True, yAxieIsLog=False, powerRank=powerRank);

#fig4_2
# myplot.plot(x, sumy, label='', xlabel='Ratings', ylabel='Aggregation of views with <= x ratings', xAxieIsLog=True, yAxieIsLog=False, powerRank=powerRank);

#fig4_3
# myplot.plot(x, avrgY, label='', xlabel='Ratings', ylabel='Average views', xAxieIsLog=True, yAxieIsLog=False, powerRank=powerRank);

#fig4_4
myplot.plot(x,
            avrg_sumY,
            label='',
            xlabel='Ratings',
            ylabel='Aggregation of average views',
            xAxieIsLog=True,
            yAxieIsLog=False,
            powerRank=powerRank)
コード例 #16
0
countOfstars = np.zeros(11);
index = 0;
for line in dataset:
    stars = line['stars'];

    # 减少噪点
    # if lengthOfInt >= np.power(10, powerRank) - 1:
    #     continue;
    countOfstars[stars / 5] += 1;
    viewTimes[stars / 5] += int(line['views']);
    if index % (len(dataset) / 10) == 0:
        print("running... " + str(index / (len(dataset) / 10)) + "0%");
    index += 1;

#使用累积值
# for i in reversed(range(0,np.power(10, powerRank) - 1)):
#     viewTimes[i] = viewTimes[i] + viewTimes[i+1];

# 取平均值
avrgY = list(viewTimes);
for i in range(0, 11):
    if(not(countOfstars[i] == 0)):
        avrgY[i] /= countOfstars[i];

x = range(0, 11);

y = list(viewTimes);
myplot.plot(x, y, label='count', xlabel='Stars', ylabel='Views', xAxieIsLog=False, yAxieIsLog=True);

myplot.plot(x, avrgY, label='count', xlabel='Stars', ylabel='Average of views', xAxieIsLog=False, yAxieIsLog=True);
コード例 #17
0
def main():
    # will replace file paths with Log object
    http_bytes_to_log = {
        1000:'1kb-http.csv',
        10000:'10kb-http.csv',
        100000:'100kb-http.csv',
        1000000:'1mb-http.csv',
        10000000:'10mb-http.csv',
    }
    http_cache_bytes_to_log = {
        1000:'1kb-http-cache.csv',
        10000:'10kb-http-cache.csv',
        100000:'100kb-http-cache.csv',
        1000000:'1mb-http-cache.csv',
        10000000:'10mb-http-cache.csv',
    }
    https_bytes_to_log = {
        1000:'1kb-https.csv',
        10000:'10kb-https.csv',
        100000:'100kb-https.csv',
        1000000:'1mb-https.csv',
        10000000:'10mb-https.csv',
    }


    # load files and make log objects
    for size in http_bytes_to_log:
        path = os.path.join(args.logdir, http_bytes_to_log[size])
        log = PowerMonitorLog(path)
        http_bytes_to_log[size] = log
    
    for size in http_bytes_to_log:
        path = os.path.join(args.logdir, http_cache_bytes_to_log[size])

        # do we have stats from cache load?
        have_cache = os.path.exists(path)
        if not have_cache: break

        log = PowerMonitorLog(path)
        http_cache_bytes_to_log[size] = log
    
    for size in https_bytes_to_log:
        path = os.path.join(args.logdir, https_bytes_to_log[size])
        log = PowerMonitorLog(path)
        https_bytes_to_log[size] = log

    # plot stuff
    sizes = sorted(http_bytes_to_log.keys())
    xsizes = numpy.array(sizes)/1000.0

    # energy consumed
    http_extra_energy = []
    http_cache_extra_energy = []
    https_extra_energy = []
    http_duration = []
    http_cache_duration = []
    https_duration = []
    for size in sizes:
        http_log = http_bytes_to_log[size]
        http_extra_energy.append(http_log.above_baseline_energy_uAh / 1000.0 / 100.0)  # uAh -> mAh -> per object
        http_duration.append(http_log.duration_seconds / 100.0)  # per object time, not total
        
        if have_cache:
            http_cache_log = http_cache_bytes_to_log[size]
            http_cache_extra_energy.append(http_cache_log.above_baseline_energy_uAh / 1000.0 / 100.0)  # uAh -> mAh -> per object
            http_cache_duration.append(http_cache_log.duration_seconds / 100.0)  # per-object time, not total

        https_log = https_bytes_to_log[size]
        https_extra_energy.append(https_log.above_baseline_energy_uAh / 1000.0 / 100.0)  # uAh -> mAh -> per object
        https_duration.append(https_log.duration_seconds / 100.0)  # per-object time, not total

    if have_cache:
        myplot.plot([xsizes, xsizes, xsizes, xsizes, xsizes, xsizes],
            [https_extra_energy, https_duration, http_extra_energy, http_duration, http_cache_extra_energy, http_cache_duration],
            labels=['HTTPS Energy', 'HTTPS Time', 'HTTP Energy', 'HTTP Time', 'HTTP Cache Energy', 'HTTP Cache Time'],
            colors=['red', 'red', 'black', 'black', 'green', 'green'], linestyles=['-', '--', '-', '--', '-', '--'],
            axis_assignments=[0, 1, 0, 1, 0, 1],
            xlabel='File Size [kB]', ylabel='Energy per Object [mAh]',
            num_series_on_addl_y_axis=2, additional_ylabels=['Time per Object [s]'],
            xscale='log', height_scale=0.85, legend_text_size=16,
            legend='upper left', labelspacing=0.1, handletextpad=0.4,
            ylim=(0, 1.8), additional_ylims=[(0, 35)],
            filename=os.path.join(args.logdir, 'energy_consumption.pdf'))
    else:
        myplot.plot([xsizes, xsizes, xsizes, xsizes],
            [https_extra_energy, https_duration, http_extra_energy, http_duration],
            labels=['HTTPS Energy', 'HTTPS Time', 'HTTP Energy', 'HTTP Time'],
            colors=['red', 'red', 'black', 'black'], linestyles=['-', '--', '-', '--'],
            axis_assignments=[0, 1, 0, 1],
            xlabel='File Size [kB]', #ylabel='Energy per Object [mAh]',
            title='Wi-Fi',
            show_y_tick_labels = False,
            num_series_on_addl_y_axis=2, additional_ylabels=['Time per Object [s]'],
            legend='upper left', labelspacing=0.1, handletextpad=0.4,
            xscale='log', height_scale=0.7, width_scale=0.6, ylim=(0, 1.8), additional_ylims=[(0, 35)],
            filename=os.path.join(args.logdir, 'energy_consumption_small.pdf'))
        
        myplot.plot([xsizes, xsizes, xsizes, xsizes],
            [https_extra_energy, https_duration, http_extra_energy, http_duration],
            labels=['HTTPS Energy', 'HTTPS Time', 'HTTP Energy', 'HTTP Time'],
            colors=['red', 'red', 'black', 'black'], linestyles=['-', '--', '-', '--'],
            axis_assignments=[0, 1, 0, 1],
            xlabel='File Size [kB]', ylabel='Energy per Object [mAh]',
            #title='Wi-Fi',
            num_series_on_addl_y_axis=2, additional_ylabels=['Time per Object [s]'],
            #legend='upper left', 
            labelspacing=0.1, handletextpad=0.4,
            xscale='log', yscale='log', additional_yscales=['log'],
            height_scale=0.7, 
            #ylim=(0, 1.8), additional_ylims=[(0, 35)],
            filename=os.path.join(args.logdir, 'energy_consumption.pdf'))



    # average current
    http_mean_current = []
    https_mean_current = []
    http_stddev = []
    https_stddev = []
    http_mean_current_per_byte = []
    https_mean_current_per_byte = []
    http_stddev_per_byte = []
    https_stddev_per_byte = []
    for size in sizes:
        http_log = http_bytes_to_log[size]
        http_mean_current.append(http_log.mean_current - http_log.baseline)
        http_stddev.append(http_log.stddev_current)
        http_mean_current_per_byte.append((http_log.mean_current - http_log.baseline) / float(size))
        http_stddev_per_byte.append(http_log.stddev_current / float(size))

        https_log = https_bytes_to_log[size]
        https_mean_current.append(https_log.mean_current - https_log.baseline)
        https_stddev.append(https_log.stddev_current)
        https_mean_current_per_byte.append((https_log.mean_current - https_log.baseline) / float(size))
        https_stddev_per_byte.append(https_log.stddev_current / float(size))
    
    # average current
    myplot.plot([xsizes, xsizes], [https_mean_current, http_mean_current],
        labels=['HTTPS', 'HTTP'], yerrs=[https_stddev, http_stddev],
        linestyles=['-', '-'], colors=['red', 'black'],
        xlabel='File Size (kB)', ylabel='Mean Current (mA)',
        xscale='log', height_scale=0.7,
        filename=os.path.join(args.logdir, 'mean_current.pdf'))
    
    # average current per byte
    myplot.plot([xsizes, xsizes], [https_mean_current_per_byte, http_mean_current_per_byte],
        labels=['HTTPS', 'HTTP'], yerrs=[https_stddev_per_byte, http_stddev_per_byte],
        linestyles=['-', '-'], colors=['red', 'black'],
        xlabel='File Size (kB)', ylabel='Mean Current per Byte (mA/B)',
        xscale='log', height_scale=0.7,
        filename=os.path.join(args.logdir, 'mean_current_per_byte.pdf'))
コード例 #18
0
    "threads", "sparsity", "compression time taken", "solution time taken"
]
Case_sensitivity = [0, 0, 0, 0]
Method = [1, 0, 0, 0]
Splitting = [0, 2, 3, 3]

t_x86 = mpr.parse(x86, Keywords, Case_sensitivity, Method, Splitting)
t_x86TH = mpr.parse(x86TH, Keywords, Case_sensitivity, Method, Splitting)
t_emu = mpr.parse(emu, Keywords, Case_sensitivity, Method, Splitting)
t_emuTH = mpr.parse(emuTH, Keywords, Case_sensitivity, Method, Splitting)

if (type(t_emu) == int):
    print(error[t_emu - 1])

#plt = mp.plot(np.array((x86[:,0]),dtype=int),                           #X data
#        np.array([emuTH[:,2],x86TH[:,2]*np.ones((x86.shape[0],))]),                              #Y data
#        [0,0],                                                          #Plot Type
#        ["blue","orange"],                                              #Colors
#        ["Threads","Clock Cycles","EMU vs x86 1024","EMU","x86"],       #Names
#        [1,1])                                                          #Labels

plt = mpl.plot(
    ["emu1", "emu2", "emu3", "emu4", "emu5", "emu6", "emu7", "emu8"],  #X data
    np.array([t_x86[0, :, 3], t_emu[0, :, 3]]),  #Y data
    [0, 0],  #Plot Type
    ["blue", "orange"],  #Colors
    ["Threads", "Clock Cycles", "EMU vs x86 1024", "EMU", "x86"],  #Names
    [1, 1])  #Labels

plt.savefig('1024emuVSx86.png', transparent=True)
plt.savefig('1024emuVSx86.svg', transparent=True)
コード例 #19
0
ファイル: plots.py プロジェクト: dtnaylor/session-management
    xlabel='Mean # Conflicting Policies per Context across 1000 Apps',
    filename='mean_conflicts_per_app_across_contexts.pdf')

myplot.cdf([num_contexts_per_app_with_any_conflict], height_scale=0.6,
    xlabel='% of Contexts Producing a Conflict Across 1000 Apps',
    filename='num_contexts_per_app_with_any_conflict.pdf')


##
## IRC plot
##

x_plain = [10, 20, 30, 50, 100]
y_plain = [437, 1872, 2857, 3392, 3389]
x_dummy = [10, 20, 30, 40, 50, 100]
y_dummy = [536, 1862, 2520, 2925, 3073, 3290]
x_enc = [10, 20, 30, 40, 50, 100]
y_enc = [51, 124, 128, 229, 303, 411]

myplot.plot([x_plain, x_dummy, x_enc], [y_plain, y_dummy, y_enc],
    labels=['No Shim', 'Shim w/o Encryption', 'Shim w/ Encryption'],
    xlabel='Number of Connected Clients', ylabel='Messages per Second',
    height_scale=0.6, legend='right',
    filename='irc-tput-all.pdf')

myplot.plot([x_plain, x_dummy, x_enc], [y_plain, y_dummy, y_enc],
    labels=['No Shim', 'Shim w/o Encryption', 'Shim w/ Encryption'],
    xlabel='Number of Connected Clients', ylabel='Messages per Second',
    height_scale=0.6, yscale='log',
    filename='irc-tput-all-log.pdf')
コード例 #20
0
def plot_results(filename_to_results, filenames=None):
    # use the filenames list to make sure we process files in order
    # (so we can control the order of the series on the plot)
    if not filenames: filenames = filename_to_results.keys()
    
    filename_to_data = defaultdict(lambda: defaultdict(list))
    fraction_data = []
    fraction_labels = []
    absolute_data = []
    absolute_labels = []
    mean_percents_by_size = []
    mean_absolutes_by_size = []
    mean_by_size_xs = []
    mean_by_size_ys = []
    mean_by_size_yerrs = []
    mean_by_size_labels = []
    
    for filename in filenames:
        results = filename_to_results[filename]
        for r in results:
            if r.status == SUCCESS:
                filename_to_data[filename]['both_success'].append(r.url)

                filename_to_data[filename]['mean_percent_inflations'].append(r.https_mean / r.http_mean)
                filename_to_data[filename]['mean_absolute_inflations'].append(r.https_mean - r.http_mean)
                filename_to_data[filename]['median_percent_inflations'].append(r.https_median / r.http_median)
                filename_to_data[filename]['median_absolute_inflations'].append(r.https_median - r.http_median)
                if r.size:
                    filename_to_data[filename]['mean_percent_by_size'].append( (r.size/1000.0, r.https_mean / r.http_mean, r.http_stddev) )
                    filename_to_data[filename]['mean_absolute_by_size'].append( (r.size/1000.0, r.https_mean - r.http_mean, r.http_stddev) )
                    filename_to_data[filename]['mean_http_by_size'].append( (r.size/1000.0, r.http_mean, r.http_stddev) )
                    filename_to_data[filename]['mean_https_by_size'].append( (r.size/1000.0, r.https_mean, r.http_stddev) )
            elif r.status == FAILURE_NO_HTTP:
                filename_to_data[filename]['no_http'].append(r.url)
            elif r.status == FAILURE_NO_HTTPS:
                filename_to_data[filename]['no_https'].append(r.url)
            else:
                filename_to_data[filename]['other_error'].append(r.url)

        print '%i sites were accessible over both protocols' %\
            len(filename_to_data[filename]['both_success'])
        print '%i sites were not accessible over HTTP' %\
            len(filename_to_data[filename]['no_http'])
        print '%i sites were not accessible over HTTPS' %\
            len(filename_to_data[filename]['no_https'])
        print '%i sites were not accessible for other reasons' %\
            len(filename_to_data[filename]['other_error'])

        if 'pit' in filename:
            location = 'PIT'
        elif '3g' in filename:
            location = '3G'
        else:
            location = 'Fiber'

        fraction_data.append(filename_to_data[filename]['mean_percent_inflations'])
        fraction_labels.append('Mean (%s)' % location)
        fraction_data.append(filename_to_data[filename]['median_percent_inflations'])
        fraction_labels.append('Median (%s)' % location)

        absolute_data.append(numpy.array(filename_to_data[filename]['mean_absolute_inflations']))# * 1000)  # s -> ms
        absolute_labels.append('Mean (%s)' % location)
        absolute_data.append(numpy.array(filename_to_data[filename]['median_absolute_inflations']))# * 1000)  # s -> ms
        absolute_labels.append('Median (%s)' % location)

        try:
            mean_by_size_xs.append(zip(*sorted(filename_to_data[filename]['mean_http_by_size']))[0])
            mean_by_size_ys.append(zip(*sorted(filename_to_data[filename]['mean_http_by_size']))[1])
            mean_by_size_yerrs.append(zip(*sorted(filename_to_data[filename]['mean_http_by_size']))[2])
            mean_by_size_labels.append('Mean HTTP (%s)' % location)
            mean_by_size_xs.append(zip(*sorted(filename_to_data[filename]['mean_https_by_size']))[0])
            mean_by_size_ys.append(zip(*sorted(filename_to_data[filename]['mean_https_by_size']))[1])
            mean_by_size_yerrs.append(zip(*sorted(filename_to_data[filename]['mean_https_by_size']))[2])
            mean_by_size_labels.append('Mean HTTPS (%s)' % location)
        except Exception as e:
            logging.warn('Error processing size data: %s' % e)

        if location == 'BCN':
            mean_percents_by_size.append(filename_to_data[filename]['mean_percent_by_size'])
            mean_absolutes_by_size.append(filename_to_data[filename]['mean_absolute_by_size'])
    

    myplot.cdf(fraction_data,
        xlabel='Load Time Ratio (HTTPS/HTTP)', labels=fraction_labels,
        filename=os.path.join(args.outdir, '%s_fraction_inflation.pdf' % args.tag),
        height_scale=0.7, numbins=10000, xlim=(1, 3), legend='lower right')

    myplot.cdf(absolute_data,
        xlabel='Load Time Difference (HTTPS-HTTP) [s]', labels=absolute_labels,
        filename=os.path.join(args.outdir, '%s_absolute_inflation.pdf' % args.tag),
        height_scale=0.7, numbins=10000, xlim=(0,3), legend='lower right')
    
    myplot.cdf(absolute_data,
        xlabel='Load Time Difference (HTTPS-HTTP) [s]', labels=absolute_labels,
        filename=os.path.join(args.outdir, '%s_absolute_inflation_log.pdf' % args.tag),
        height_scale=0.7, numbins=10000, xscale='log', xlim=(0, 10), legend='lower right')

    # Plot fraction and absolute in same figure as subplots
    fig, ax_array = myplot.subplots(1, 2, height_scale=0.75, width_scale=1.2)
    myplot.cdf(fraction_data, fig=fig, ax=ax_array[0],
        xlabel='Load Time Ratio\n(HTTPS/HTTP)', labels=fraction_labels,
        numbins=10000, xlim=(1, 3), show_legend=False)

    lines, labels = myplot.cdf(absolute_data, fig=fig, ax=ax_array[1],
        xlabel='Load Time Difference\n(HTTPS-HTTP) [s]', labels=absolute_labels,
        numbins=10000, xlim=(0,3), legend='lower right', labelspacing=0.1, handletextpad=0.4)

    # shrink plots to make room for legend underneath
    #for ax in ax_array:
    #    box = ax.get_position()
    #    ax.set_position([box.x0, box.y0 + box.height * 0.25,
    #             box.width, box.height * 0.75])
    
    # shrink plots to make room for title above
    for ax in ax_array:
        box = ax.get_position()
        ax.set_position([box.x0, box.y0,
                 box.width, box.height * 0.95])

    #myplot.save_plot(os.path.join(args.outdir, '%s_combined_inflation_no_legend.pdf' % args.tag))
    #fig.legend(lines, labels, loc='lower center', ncol=2, prop={'size':20}, frameon=False,
    #    bbox_to_anchor=(.5, -.03))
    fig.suptitle('O-Proxy Top 2000 Objects')
    myplot.save_plot(os.path.join(args.outdir, '%s_combined_inflation.pdf' % args.tag))


    try:
        myplot.plot([zip(*mean_percents_by_size[0])[0]], [zip(*mean_percents_by_size[0])[1]],
            xlabel='Object Size (KB)', ylabel='Fraction Inflation (HTTPS/HTTP)',
            linestyles=[''], xscale='log', 
            filename=os.path.join(args.outdir, '%s_fraction_by_size.pdf' % args.tag))
    
        myplot.plot([zip(*mean_absolutes_by_size[0])[0]], [zip(*mean_absolutes_by_size[0])[1]],
            xlabel='Object Size (KB)', ylabel='Absolute Inflation (HTTPS-HTTP) [sec]',
            linestyles=[''], xscale='log',
            filename=os.path.join(args.outdir, '%s_absolute_by_size.pdf' % args.tag))
    
        myplot.plot(mean_by_size_xs, mean_by_size_ys, yerrs=mean_by_size_yerrs,
            xlabel='Object Size (KB)', ylabel='Load Time [sec]', xscale='log',
            marker=None, labels=mean_by_size_labels,# legend='lower left',
            legend_cols=2, width_scale=2,
            filename=os.path.join(args.outdir, '%s_mean_lt_by_size.pdf' % args.tag))
    except Exception as e:
        logging.warn('Error processing size data: %s', e)
コード例 #21
0
    if (not (countOfLength[i] == 0)):
        avrgY[i] /= countOfLength[i]

# 使用累积平均值
avrg_sumY = list(avrgY)
myplot.normalize(avrg_sumY)
for i in range(1, np.power(10, powerRank)):
    avrg_sumY[i] = avrg_sumY[i] + avrg_sumY[i - 1]

x = range(0, np.power(10, powerRank))

#fig3_1
# myplot.bar(x, y, label='', xlabel='Length', ylabel='Views', xAxieIsLog=True, yAxieIsLog=False, powerRank=powerRank, deleteZero=True);

#fig3_2
# myplot.plot(x, sumy, label='', xlabel='Length', ylabel='Aggregation of views rate', xAxieIsLog=True, yAxieIsLog=False, powerRank=powerRank, deleteZero=False);

#fig3_3
# myplot.plot(x, avrgY, label='count', xlabel='Length', ylabel='Average views', xAxieIsLog=True, yAxieIsLog=False, powerRank=powerRank, deleteZero=True);

#fig3_4
myplot.plot(x,
            avrg_sumY,
            label='count',
            xlabel='Length',
            ylabel='Aggregation of average views rate',
            xAxieIsLog=True,
            yAxieIsLog=False,
            powerRank=powerRank,
            deleteZero=True)
コード例 #22
0
ファイル: plot.py プロジェクト: mami-project/mcsVNF
def plot_series(machine, remote, result_files):

    out_filename, out_filepath = outfile(args.opt, remote, machine)

    xs = []  # holds arrays of x values, 1 per series
    ys = []  # holds arrays of y values, 1 per series
    yerrs = []  # holds arrays of std devs, 1 per series
    labels = []
    plot_title = ''

    # maps X value (e.g., num slices or num mboxes) to a list of measured or
    # calculated RTTs for that value (sometimes RTT depends on X, e.g., #mbox)
    x_to_rtts = defaultdict(list)

    for protocol in PROTOCOLS[args.opt]:
        if protocol not in result_files: continue
        filepath = result_files[protocol]
        print '[IN]', protocol, filepath
        data = numpy.loadtxt(filepath)
        if len(data) == 0 or\
           len(data.shape) != 2 or\
           data.shape[1] not in (5, 7):  # should be either 5 or 7 cols
            print 'WARNING: malformed data: %s' % filepath
            continue

        transform = numpy.vectorize(DATA_TRANSFORMS[args.opt])

        xs.append(data[:, 0])
        ys.append(transform(data[:, 3]))
        yerrs.append(transform(data[:, 4]))
        labels.append(LEGEND_STRINGS[protocol])
        plot_title = title(args.opt, remote, data)

        # RTT measurements
        rtts, _, _ = get_params(args.opt, remote, data)
        for i in range(data.shape[0]):
            x_to_rtts[data[i, 0]].append(rtts[i])

    rtt_lines = []
    if SHOW_RTTS[args.opt]:
        # average RTT measurements
        for x in x_to_rtts:
            x_to_rtts[x] = numpy.mean(x_to_rtts[x])
        # find RTT line endpoints
        e1 = (xs[0][0], x_to_rtts[xs[0][0]])
        e2 = (xs[0][-1], x_to_rtts[xs[0][-1]])
        rtt_lines = make_rtt_lines((e1, e2), SHOW_RTTS[args.opt])

    # TODO: avg RTT measurment
    rtt = 75

    if len(xs) != len(ys) or len(ys) != len(yerrs) or\
       len(yerrs) != len(labels) or len(ys) == 0:
        print 'ERROR: no well-formed data to plot ***'
        return

    print '[OUT]', out_filepath
    myplot.plot(xs, ys, yerrs=yerrs, labels=labels, xlabel=X_AXIS[args.opt],\
        ylabel=Y_AXIS[args.opt], guide_lines=rtt_lines[1:],\
        #title=plot_title,\
        #builds = [[], [3], [3, 1, 2], [3, 1, 2, 0], [3, 1, 2, 0, 4]],\
        #builds = [[], [1, 2], [0, 1, 2], [0, 1, 2, 3, 4]],\
        filename=out_filepath, **MANUAL_ARGS[out_filename])

    dump_to_file_for_xkcd(xs, ys, yerrs, labels, out_filepath)
コード例 #23
0
        2013: "http://sdibc.nju.edu.cn/zg/2013s07.htm",
    }
    M0, M1, M2 = ReadResults(url[year])
    return M0, M1, M2


def split(M_1, M):
    x = []
    y = []
    c = len(M)
    for i in range(c):
        x.append(i + 1)
        y.append((M[i] - M_1[i]) / M[i] * 100)
    return x, y


if __name__ == "__main__":
    M0, M1, M2 = ReadYear(2016)
    M0_1, M1_1, M2_1 = ReadYear(2015)

    m0x, m0y = split(M0_1, M0)
    m1x, m1y = split(M1_1, M1)
    m2x, m2y = split(M2_1, M2)
    myplot.plot(m0x, m0y, label="M0")
    myplot.plot(m1x, m1y, label="M1")
    myplot.plot(m2x, m2y, label="M2")
    myplot.Show(title=str(2016) + u'货币供应量同比增长率M0,M1与M2对比',
                xlabel=u'月份',
                ylabel=u'月同比增长率',
                legend=True)
コード例 #24
0
            arr_x50.append(x)
            c50 = 'pink'
            if arr_new[-1] > 3:
                print(" > 30")
            else:
                arr_y30.append(y)
                arr_x30.append(x)
                c30 = 'yellow'
                if arr_new[-1] > 1.5:
                    print(" > 20")
                else:
                    arr_y20.append(y)
                    arr_x20.append(x)
                    c20 = 'red'
                    if arr_new[-1] > 0.75:
                        print(" > 10")
                    else:
                        arr_y10.append(y)
                        arr_x10.append(x)
                        c10 = 'blue'
                        if arr_new[-1] > 0.325:
                            print(" > 5")
                        else:
                            arr_y5.append(y)
                            arr_x5.append(x)
                            c5 = 'black'
        iIm += 0.01
    iRe += 0.01

plot(arr_x50, arr_y50, c50, arr_x30, arr_y30, c30, arr_x20, arr_y20, c20,
     arr_x10, arr_y10, c10, arr_x5, arr_y5, c5)
コード例 #25
0
ファイル: proj0.py プロジェクト: luyh7/graduation-project
viewTimes = []
index = 0
for line in dataset:
    views = line['views']

    # 减少噪点
    # if lengthOfInt >= np.power(10, powerRank) - 1:
    #     continue;

    viewTimes.append(int(line['views']))
    if index % (len(dataset) / 10) == 0:
        print("running... " + str(index / (len(dataset) / 10)) + "0%")
    index += 1

viewTimes.sort()
viewTimes.reverse()
#使用累积值
# for i in reversed(range(0,np.power(10, powerRank) - 1)):
#     viewTimes[i] = viewTimes[i] + viewTimes[i+1];

x = range(0, len(viewTimes))

y = viewTimes
myplot.plot(x,
            y,
            label='count',
            xlabel='Rank',
            ylabel='Views',
            xAxieIsLog=True,
            yAxieIsLog=True)
コード例 #26
0
ファイル: main.py プロジェクト: youssefaly97/Graph-Plotter
#        [1,0,1,1,0])     #Labels
#
#plt.savefig('test.svg',transparent = True)

#plt = mp.plot(np.array((x86[:,0]),dtype=int),                             #X data
#        np.array([emu[:,2],emuTH[:,2],x86[1,2]*np.ones((x86.shape[0],)),x86TH[1,2]*np.ones((x86.shape[0],))]),                              #Y data
#        [0,0,1,1],                                                      #Plot Type
#        ["blue","red","purple","orange"],                                          #Colors
#        ["Threads","Clock Cycles","EMU vs AMD 1024","EMU","EMU TH","x86","x86 TH"],
#        [1,1,1,1])     #Labels

#plt = mp.plot(np.array((x86[:,0]),dtype=int),                             #X data
#        np.array([emu[:,2],emuTH[:,2],x86[:,2],x86TH[:,2]]),                              #Y data
#        [0,0,0,0],                                                      #Plot Type
#        ["blue","red","purple","orange"],                                          #Colors
#        ["Threads","Clock Cycles","EMU vs AMD 1024","EMU","EMU TH","x86","x86 TH"],
#        [0,0,0,0])     #Labels

#plt.savefig('1024allThreads.png',transparent = True)
#plt.savefig('1024allThreads.svg',transparent = True)

plt = mp.plot(
    np.array((x86[:, 0]), dtype=int),  #X data
    np.array([emuTH[:, 2], x86TH[1, 2] * np.ones((x86.shape[0], ))]),  #Y data
    [0, 1],  #Plot Type
    ["blue", "orange"],  #Colors
    ["Threads", "Clock Cycles", "EMU vs x86 1024", "EMU", "x86"],  #Names
    [1, 1])  #Labels

plt.savefig('1024emuVSx86.png', transparent=True)
plt.savefig('1024emuVSx86.svg', transparent=True)
コード例 #27
0
ファイル: plot.py プロジェクト: JudsonWilson/mctls
def plot_series(machine, remote, result_files):

    out_filename, out_filepath = outfile(args.opt, remote, machine)

    xs = []  # holds arrays of x values, 1 per series
    ys = []  # holds arrays of y values, 1 per series
    yerrs = []  # holds arrays of std devs, 1 per series
    labels = []
    plot_title = ''

    # maps X value (e.g., num slices or num mboxes) to a list of measured or
    # calculated RTTs for that value (sometimes RTT depends on X, e.g., #mbox)
    x_to_rtts = defaultdict(list)

    for protocol in PROTOCOLS[args.opt]:
        if protocol not in result_files: continue
        filepath = result_files[protocol]
        print '[IN]', protocol, filepath
        data = numpy.loadtxt(filepath)
        if len(data) == 0 or\
           len(data.shape) != 2 or\
           data.shape[1] not in (5, 7):  # should be either 5 or 7 cols
            print 'WARNING: malformed data: %s' % filepath
            continue

        transform = numpy.vectorize(DATA_TRANSFORMS[args.opt])
        
        xs.append(data[:,0])
        ys.append(transform(data[:,3]))
        yerrs.append(transform(data[:,4]))
        labels.append(LEGEND_STRINGS[protocol])
        plot_title = title(args.opt, remote, data)

        # RTT measurements
        rtts, _, _ = get_params(args.opt, remote, data)
        for i in range(data.shape[0]):
            x_to_rtts[data[i,0]].append(rtts[i])

    rtt_lines = []
    if SHOW_RTTS[args.opt]:
        # average RTT measurements
        for x in x_to_rtts:
            x_to_rtts[x] = numpy.mean(x_to_rtts[x])
        # find RTT line endpoints
        e1 = (xs[0][0], x_to_rtts[xs[0][0]])
        e2 = (xs[0][-1], x_to_rtts[xs[0][-1]])
        rtt_lines = make_rtt_lines((e1, e2), SHOW_RTTS[args.opt])
            

    # TODO: avg RTT measurment
    rtt = 75

    if len(xs) != len(ys) or len(ys) != len(yerrs) or\
       len(yerrs) != len(labels) or len(ys) == 0:
        print 'ERROR: no well-formed data to plot ***'
        return

    print '[OUT]', out_filepath
    myplot.plot(xs, ys, yerrs=yerrs, labels=labels, xlabel=X_AXIS[args.opt],\
        ylabel=Y_AXIS[args.opt], guide_lines=rtt_lines[1:],\
        #title=plot_title,\
        #builds = [[], [3], [3, 1, 2], [3, 1, 2, 0], [3, 1, 2, 0, 4]],\
        #builds = [[], [1, 2], [0, 1, 2], [0, 1, 2, 3, 4]],\
        filename=out_filepath, **MANUAL_ARGS[out_filename])

    dump_to_file_for_xkcd(xs, ys, yerrs, labels, out_filepath)