示例#1
0
 def ask_color_axis_bgcolor(self, *args, **kwargs):
     logging.debug("clicked axis_bgcolor button")
     result = tkColorChooser.askcolor(parent=self.master, initialcolor=None, title="axis_bgclor")
     ttk.Style().configure("axis_bgcolor.TButton", background=result[1])
     logging.debug("setting axis_bgcolor to chose color %s" % (repr(result)))
     plt.colors()
     self.mAxes.set_axis_bgcolor(result[1])
示例#2
0
def model(a, D):
    X = [ item[0] for item in a]
    Y = [ item[1] for item in a]

    test_value = np.arange(0, np.max(Y), 0.2)
    test_data = np.array([ [item**i for i in range(D)] for item in test_value ])
    test_data_holder = tf.placeholder(tf.float64, shape=(test_value.shape[0], D))

    lmbda_holder = tf.placeholder(tf.float64, shape=None)

    x_data = np.array([ [i**j for j in range(D)] for i in X ], dtype='float64')
    y_data = x_data[:, 1]

    print("x shape = {} y shape = {}".format(x_data.shape, y_data.shape))


    W = tf.Variable(tf.random_uniform([D, 1], -0.1, 0.1, dtype=tf.float64))
    y = tf.matmul(x_data, W)
    y = tf.reshape(y, [-1])

    test_y = tf.matmul(test_data_holder, W)

#    lmbda = 1
    lmbda = 0

    # Gradient descent
    loss = tf.reduce_mean(tf.square(tf.subtract(y, y_data))) + lmbda_holder*tf.nn.l2_loss(W)
    optimizer = tf.train.AdamOptimizer(0.001)
    train = optimizer.minimize(loss)

    init = tf.global_variables_initializer()

    with tf.Session() as sess:

        sess.run(init)

        for step in range(100000):
            sess.run(train, {lmbda_holder:lmbda})
            if step % 1000 == 0:
                print(step, sess.run([loss, W], {test_data_holder:test_data, lmbda_holder:lmbda}))

        loss, params, out = sess.run([loss, W, test_y], {test_data_holder:test_data, lmbda_holder:lmbda})
        print(params)
        print(f"loss = {loss}")

        plt.colors()
        plt.scatter(X, Y)
        plt.plot(X, X)
        plt.plot(test_value, np.reshape(out, [-1]))
        plt.show()
示例#3
0
def plot(n, X, Decoded_X):
    plt.figure(figsize=(30, 30))
    for i in range(n):
        print('original')
        plt.subplot(2, n, i + 1)
        plt.imshow(X[i].reshape(500, 500, 3))
        plt.colors()
        plt.axis('off')

        print('reconstruction')
        plt.subplot(2, n, i + 1 + n)
        plt.imshow(Decoded_X[i].reshape(500, 500, 3))
        plt.colors()
        plt.axis('off')

    plt.tight_layout()
    plt.show()
def subploters():
    charlie = mpimg.imread('mario.png')
    #  colormaps plt.cm.datad
    cmaps = set(plt.cm.datad.keys())
    x = [(4, 3, 1, (1, 0, 0)), (4, 3, 2, (0.5, 0.5, 0)), (4, 3, 3, (0, 1, 0)),
         (4, 3, 4, (0, 0.5, 0.5)), (4, 3, (5, 8), (0, 0, 1)),
         (4, 3, 6, (1, 1, 0)), (4, 3, 7, (0.5, 1, 0)),
         (4, 3, 9, (0, 0.5, 0.5)), (4, 3, 10, (0, 0.5, 1)),
         (4, 3, 11, (0, 1, 1)), (4, 3, 12, (0.5, 1, 1))]
    fig = plt.figure(figsize=(6, 5))
    #fig.subplots_adjust(bottom=0, left=0, top = 0.975, right=1)
    for nrows, ncols, plot_number, factor in x:
        sub = fig.add_subplot(nrows, ncols, plot_number)
        sub.set_xticks([])
        plt.colors()
        sub.imshow(charlie * 0.0002, cmap=cmaps.pop())
        sub.set_yticks([])
    fig.show()
    def saveFinalPlots(self, errors_train, errors_test, sparsity_train, sparsity_test, errors_train_vector, errors_test_vector, epoch=0):
        #plot errors
        plt.figure(2, figsize=(10, 7))
        plt.clf()
        plt.plot(np.arange(len(errors_train)), errors_train, label='train error')
        plt.plot(np.arange(len(errors_train)), errors_test, label='test error')
        plt.colors()
        plt.legend()
        plt.title('Reconstruction error convergence')
        plt.xlabel('t')
        plt.ylabel('Reconstruction error')
        plt.savefig('plots/Reconstruction_errors_'+str(epoch)+'.pdf')

        #plot sparsity, real and non-zero
        plt.figure(3, figsize=(10, 7))
        plt.clf()
        plt.plot(np.arange(len(sparsity_train)), sparsity_train, label='train error')
        plt.plot(np.arange(len(sparsity_test)), sparsity_test, label='test error')
        plt.colors()
        plt.legend()
        plt.title('Objective function error convergence')
        plt.xlabel('t')
        plt.ylabel('E')
        plt.savefig('plots/Sparsity_'+str(epoch)+'.pdf')

        # plot reconstruction error output progression over time
        plt.figure(12, figsize=(10, 7))
        plt.clf()
        image=plt.imshow(np.clip(np.asarray(errors_train_vector).T, 0, 1), interpolation='nearest', aspect='auto', origin='lower')
        plt.xlabel('t')
        plt.ylabel('Output units \n (Rank Ordered)')
        plt.colors()
        plt.colorbar(image, label='reconstruction error')
        plt.title('Progressive reconstruction input error convergence')
        plt.savefig('plots/Reconstruction_errors_vector_' + str(epoch) + '.pdf')
示例#6
0
# -*- coding: utf-8 -*-
__author__ = 'fred'

import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
from scipy.interpolate import interp1d


filename = "../8)kleinzugroß.txt"
# filename = "data/3)250mA.txt"
# filename = "data/3)500mA.txt"
# filename = "data/3)1000mA.1.txt"


plotcolors = plt.colors()


# def nextplotcolor(i=0):
# def cs():
#         while True:
#             while i >= len(plotcolors):
#                 i -= len(plotcolors)
#             yield plotcolors[i]
#             i += 1
#     return cs
#


def fitfunct(t, a, b, c, d, e):
    return c * np.sin(a * 2 * np.pi * t + d) * np.exp(-b * t) + e
    def saveNetworkPlots(self, network, epoch=0, calc_error_surface=False):
        # plot weights
        plt.figure(4, figsize=(10, 7))
        plt.clf()
        plt.imshow(self.reshapeWeights(network.weights), interpolation='nearest', vmin=0, vmax=1)
        plt.axis('off')
        plt.colors()
        plt.title('Learned filters')  # normalized
        plt.savefig('plots/Final_filters_' + str(epoch) + '.pdf')
        plt.pause(0.01)

        #plot progressive reconstruction
        plt.figure(5, figsize=(10, 7))
        plt.clf()
        reconstruction = np.ascontiguousarray(network.reconstruction[:, network.output_ranks])
        reconstruction[:, -1] = network.input#overwrite last one to show real image
        plt.imshow(self.reshapeWeights(reconstruction, normalize=False), interpolation='nearest', vmin=0, vmax=1)
        plt.axis('off')
        plt.colors()
        plt.title('Proggressive reconstruction')
        plt.savefig('plots/Progressive_reconstruction_' + str(epoch) + '.pdf')


        #plot unit outputs
        plt.figure(6, figsize=(10, 7))
        plt.clf()
        plt.imshow(self.reshapeWeights(network.weights, normalize=True, modifier=network.output.ravel()), interpolation='nearest', vmin=0, vmax=1)
        plt.axis('off')
        plt.colors()
        plt.title('Proggressive reconstruction filters used')  # normalized and possibly sorted
        plt.savefig('plots/Progressive_reconstruction_usedfilters_' + str(epoch) + '.pdf')


        #plot current input
        plt.figure(7, figsize=(5, 5))
        plt.clf()
        plt.imshow(network.input.reshape(network.input_shape), interpolation='nearest', vmin=0, vmax=1)
        plt.axis('off')
        plt.colors()
        plt.title('Current input')
        plt.savefig('plots/Current_input_' + str(epoch) + '.pdf')

        #plot reconstruction error surface over n samples
        if calc_error_surface:
            step_size = 5
            error_surface = np.zeros((network.weights.shape[0]/step_size, network.weights.shape[1]/step_size))
            arg_sorted_input = np.argsort(network.input, axis=1, kind='mergesort').ravel()[::-1].astype(np.int32)
            orig_input = np.copy(network.input)
            for i in range(0, error_surface.shape[0]):
                #drop inputs
                sorted_input_dropped = orig_input[:, arg_sorted_input]
                sorted_input_dropped[:, (i+1)*step_size:] = 0
                #execute
                network.input[:, arg_sorted_input] = sorted_input_dropped
                network.execute()
                network.rank_output()
                for j in range(0, error_surface.shape[1]):
                    #drop outputs
                    sorted_output_dropped = np.copy(network.output_raw[:, network.output_ranks])
                    sorted_output_dropped[:, (j+1)*step_size:] = 0
                    #TAKE ABS VALUE
                    network.output[:, network.output_ranks] = np.abs(sorted_output_dropped)
                    #reconstruct
                    network.reconstruct()
                    network.input = np.copy(orig_input)
                    error_surface[i, j]+= network.get_reconstruction_error()

            network.input = orig_input
            network.execute()
            network.rank_output()
            network.reconstruct()
            plt.figure(8, figsize=(10, 7))
            plt.clf()
            plt.colors()
            image=plt.matshow(np.minimum(1, error_surface), fignum=8)
            plt.colorbar(image, label='reconstruction error')
            plt.ylabel('active inputs \n (rank ordered)')
            plt.colors()
            plt.title('active outputs \n (rank ordered)')
            plt.savefig('plots/Real_error_surface_' + str(epoch) + '.pdf')

        # plot histogram outputs
        plt.figure(9, figsize=(10, 7))
        plt.clf()
        plt.hist(network.output[0], bins=network.output.shape[1])
        plt.title('Histogram output units')
        plt.xlabel("Value")
        plt.ylabel("Frequency")
        plt.savefig('plots/Histogram_output_' + str(epoch) + '.pdf')

        # plot histogram outputs negative
        plt.figure(10, figsize=(10, 7))
        plt.clf()
        plt.hist(network.output_raw[0], bins=network.output.shape[1])
        plt.title('Histogram output units')
        plt.xlabel("Value")
        plt.ylabel("Frequency")
        plt.savefig('plots/Histogram_output_negative_' + str(epoch) + '.pdf')

        #plot sorted weights
        plt.figure(11, figsize=(10, 7))
        plt.clf()
        weights_sorted = np.ascontiguousarray(network.weights[:, np.argsort(network.output_average)[::-1]])
        plt.imshow(self.reshapeWeights(weights_sorted), interpolation='nearest', vmin=0, vmax=1)
        plt.axis('off')
        plt.colors()
        plt.title('Learned filters sorted')
        plt.savefig('plots/Sorted_filters_' + str(epoch) + '.pdf')
示例#8
0
# G.add_node("spam")
# G.add_nodes_from("abc")
# G.add_edge('e',1)
# G.add_edge('e','spam')

# G.remove_node(1)
print G.degree(1)  #节点1的度
print nx.degree_histogram(G)  #返回图中所有节点的度分布序列(从0至最大度的出现频次)

print "num of nodes:", G.number_of_nodes()
print "num of edges:", G.number_of_edges()
print "nodes of graph:", G.nodes()
print "edges of graph:", G.edges()
print "edges of node '1' graph:", G.edges(1)
print "the neighbor of node 1 is:", G.neighbors(1)

a = nx.number_connected_components(G)  #连通分量个数

nx.draw_networkx(G, node_size=10)
plt.show()

plt.colors()

# nx.draw(G)
# plt.savefig("path.png")

#将图保存为gml文件
# nx.write_gml(G, 'dataset/aaaa.gml')

#networkx 中图格式类型数据的求两点间的最短路径
# path = nx.shortest_path_length(G, start, end)  # 求最短路径长度
示例#9
0
        print(f"iteration {i}: loss={loss_model:.4}")

print(f"W = {W}")
print(f"b = {b}")

TN = 100
test_education = np.full(TN, 22)
test_income = np.random.randint(TN, size=test_education.shape[0])
test_income = np.sort(test_income)

true_model_Y = true_y(test_education, test_income)
true_sample_Y = sample(test_education, test_income)
X = np.concatenate((test_education[:, np.newaxis], test_income[:, np.newaxis]), axis=1)

out, _, _ = compute_loss(X, W, b)
loss_model, _ = mean_square_loss(out, true_model_Y)
loss_sample, _ = mean_square_loss(out, true_sample_Y)

print(f"testing: loss (compare with Oracle)={loss_model:.6}")
print(f"testing: loss (compare with sample)={loss_sample:.2}")

plt.colors()
plt.scatter(test_income, true_model_Y, alpha=0.4)
plt.scatter(test_income, true_sample_Y, alpha=0.4)

plt.plot(test_income, out, color="r")
plt.legend(['prediction', 'true label', 'sample label'])

plt.show()

示例#10
0
def plot_generations(filename, cost=None, g=None, p=None,
                     output_constrained=['EigenValue', ['Weight', 'Lift']],
                     outputs_plotted=['Weight', 'Velocity'], source='raw',
                     units=['N', 'm/s'], n_generation=20,
                     last_best=True, color_scheme='individual',
                     optimizers=['NSGA2', 'SNOPT'], plot_type='all',
                     output_labels=None, label_size=None,
                     pareto=False, pareto_options={'maxX': False,
                                                   'maxY': False}):
    """

    :param filename: name of file to read
    :param cost: function that calculates the cost, if not defined and
           'best' used, it is the minimum value in outputs_plotted
    :param g: constraint function
    :param p: processing function(eg. convert units after constraints)
    :param output_constrained: Outputs that will be constrained by g
    :param outputs_plotted: The two outputs in the axis of the scatter
            plot. If only one is defined, the x axis is considered the
            generation
    :param source: 'raw' or 'processed'
    :param units: units to be on the scatter plots
    :param n_generation: number of generations
    :param last_best: if Trye, the last individual is the best and plot it.
    :param color_scheme: if 'individual', each point in the scatter plot
        has a unique color. If 'generation', each individual in the
        same generation have the same color.
    :optimizers: list of optimizers (works for plot_type = 'best')
    :param plot_type: 'all' plots all individuals and 'best' plots only the
        best individuals (still does not work for multiobjective).
        'all and best' plots both together. 'number of evaluations' plots
        objective functions versus number of objective function evaluations
    :param output_labels: if defined, defines the labels on the plot.
        Otherwise the outputs_plotted are used
    """
    # h = # Equality Constraint
    if g is None or output_constrained is None:
        process = False
    else:
        process = True
    if source == 'processed':
        pullData = open(filename).read()
        dataArray = pullData.split('\n')
        Drag = []
        Weight = []
        Generation = []
        n_generation = max(Generation)
        for eachLine in dataArray:
            # Avoids extra lines
            if len(eachLine) > 1:
                x, y, z = eachLine.split('\t')
                Generation.append(x)
                Weight.append(y)
                Drag.append(z)

    elif source == 'raw':
        pullData = output_reader(filename)

        # Creating Generation counter
        pullData['Generation'] = []

        RandomKey = pullData.keys()[0]  # Assumes keys have items of same len
        if RandomKey == 'Generation':
            RandomKey = pullData.keys()[1]
        population_size = len(pullData[RandomKey]) / n_generation
        for i in range(population_size,
                       len(pullData[RandomKey])+population_size):
            pullData['Generation'].append(i / population_size)
        # print pullData['Generation'], population_size
        # If process is True, the values that violate the constrain will be
        # deleted in all of the dictionaries of pullData
        if process:
            for i in range(len(pullData[RandomKey])):
                for j in range(len(g)):
                    if type(output_constrained[j]) != list:
                        if pullData[output_constrained[j]][i] is not None:
                            if not g[j](pullData[output_constrained[j]][i]):
                                for key in pullData:
                                    #                    try:
                                    pullData[key][i] = None
                #                    except:
                #                        print key

                    elif len(output_constrained[j]) == 2:

                        # Need to verify if values was already annuled
                        if (pullData[output_constrained[j][0]][i] is not None
                                and pullData[output_constrained[j][1]][i] is not None):
                            if not g[j](pullData[output_constrained[j][0]][i],
                                        pullData[output_constrained[j][1]][i]):
                                for key in pullData:
                                    #                    try:
                                    pullData[key][i] = None
                #                    except:
                #                        print key
            while None in pullData[RandomKey]:
                for key in pullData:
                    pullData[key].remove(None)

    # Processing
    if p is not None:
        for k in range(len(outputs_plotted)):
            for i in range(len(pullData[outputs_plotted[k]])):
                pullData[outputs_plotted[k]][i] = p[k](pullData[outputs_plotted[k]][i])

    plt.colors()

    generation = pullData['Generation']
    x = pullData[outputs_plotted[0]]
    if plot_type == 'number of evaluations' and source == 'raw':
        x = np.array(pullData['Generation'])*population_size
        y = pullData[outputs_plotted[0]]
        plt.xlim(0.5, n_generation*population_size + 0.5)
    elif len(outputs_plotted) == 2:
        x = pullData[outputs_plotted[0]]
        y = pullData[outputs_plotted[1]]
        space = 0.02*(max(x) - min(x))
        plt.xlim(min(x) - space, max(x) + space)
    else:
        x = pullData['Generation']
        y = pullData[outputs_plotted[0]]
        plt.xlim(0.5, n_generation + 0.5)
    # print 'x ', x
    # print 'y ', y
    if plot_type == 'best' or plot_type == 'all and best' or (
            plot_type == 'number of evaluations' and source == 'raw'):
        global_min = 9999.
        cost_list = []
        generation_list = []
        if cost is not None:
            for j in range(1, n_generation+1):
                current_min = 9999.
                for i in range(len(generation)):
                    if generation[i] == j and y[i] < current_min:
                        current_min = y[i]
                if current_min < global_min:
                    global_min = current_min
                cost_list.append(global_min)
                generation_list.append(j)
        if plot_type == 'number of evaluations' and source == 'raw':
            plt.plot(np.array(generation_list)*population_size, cost_list,
                     '-o')
        else:
            plt.plot(generation_list, cost_list, '-o')
            plt.xlim(0.5, n_generation + 0.5)

    if (plot_type == 'all' or plot_type == 'all and best') or (
            plot_type == 'number of evaluations' and source == 'raw'):
        for i in range(len(x)):
            if color_scheme == 'generation':
                plt.scatter(x[i], y[i],
                            color=((1.-float(generation[i])/n_generation,
                                    float(generation[i])/n_generation,
                                    0, 1.)))
            elif color_scheme == 'individual':
                plt.scatter(x[i], y[i], color=((1.-float(i)/len(x),
                                                float(i)/len(x),
                                                0, 1.)))
            plt.plot()

        if last_best:
            plt.scatter(x[-1], y[-1], marker='s')
            plt.plot()

    if pareto is True:
        p_front = pareto_frontier(x, y, maxX=pareto_options['maxX'],
                                  maxY=pareto_options['maxY'])
        # Then plot the Pareto frontier on top
        plt.plot(p_front[0], p_front[1], lw=3)

    plt.grid()

    if label_size is None:
        if len(outputs_plotted) == 2:
            if output_labels is None:
                if units is not None:
                    plt.xlabel(outputs_plotted[0] + ' (' + units[0] + ')')
                    plt.ylabel(outputs_plotted[1] + ' (' + units[1] + ')')
                else:
                    plt.xlabel(outputs_plotted[0])
                    plt.ylabel(outputs_plotted[1])
            else:
                if units is not None:
                    plt.xlabel(output_labels[0] + ' (' + units[0] + ')')
                    plt.ylabel(output_labels[1] + ' (' + units[1] + ')')
                else:
                    plt.xlabel(output_labels[0])
                    plt.ylabel(output_labels[1])
        else:
            plt.xlabel('Iteration number')
            if output_labels is None:
                if units is not None:
                    plt.ylabel(outputs_plotted[0] + ' (' + units[0] + ')')
                else:
                    plt.ylabel(outputs_plotted[0])
            else:
                if units is not None:
                    plt.ylabel(output_labels[0] + ' (' + units[0] + ')')
                else:
                    plt.ylabel(output_labels[0])
    else:
        if len(outputs_plotted) == 2:
            if output_labels is None:
                if units is not None:
                    plt.xlabel(outputs_plotted[0] + ' (' + units[0] + ')',
                               fontsize=label_size[0])
                    plt.ylabel(outputs_plotted[1] + ' (' + units[1] + ')',
                               fontsize=label_size[1])
                else:
                    plt.xlabel(outputs_plotted[0],
                               fontsize=label_size[0])
                    plt.ylabel(outputs_plotted[1],
                               fontsize=label_size[1])
            else:
                if units is None:
                    plt.xlabel(output_labels[0] + ' (' + units[0] + ')',
                               fontsize=label_size[0])
                    plt.ylabel(output_labels[1] + ' (' + units[1] + ')',
                               fontsize=label_size[1])
                else:
                    plt.xlabel(output_labels[0], fontsize=label_size[0])
                    plt.ylabel(output_labels[1], fontsize=label_size[1])
        else:
            if plot_type == 'number of evaluations':
                plt.xlabel('Number of objective function evaluations',
                           fontsize=label_size[0])
            else:
                plt.xlabel('Iteration number', fontsize=label_size[0])
            if output_labels is None:
                if units is not None:
                    plt.ylabel(outputs_plotted[0] + ' (' + units[0] + ')',
                               fontsize=label_size[1])
                else:
                    plt.ylabel(outputs_plotted[0],
                               fontsize=label_size[1])
            else:
                if units is not None:
                    plt.ylabel(output_labels[0] + ' (' + units[0] + ')',
                               fontsize=label_size[1])
                else:
                    plt.ylabel(output_labels[0],
                               fontsize=label_size[1])
示例#11
0
In this case, constant values are provided for the error
in both the x- and y-directions.
"""
import numpy as np
import matplotlib.pyplot as plt

# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)

fig, ax = plt.subplots()
ax.errorbar(x, y, xerr=0.2, yerr=0.4)
ax.plot([0,0],[-0.25,1],'r--')
plt.show()

print(plt.colors())

# +
from imp4nb import *
rnd = np.random.randint(1,200)
for i in range(4):
    rnd = np.random.randint(1,10)
    time.sleep(rnd)
    print('睡了'+str(rnd)+'秒……')
cnx = lite.connect('data\\quandan.db')
df = pd.read_sql("select product.品牌名称 as 品牌, count(*) as 次数, sum(金额) as 销售额"
                 " from xiaoshoumingxi,product"
                 " where (product.商品全名 = xiaoshoumingxi.商品全名) group by 品牌 order by 销售额 desc",cnx) 
print(df['日期'].max())
print(df['日期'].min())
# print(df)
示例#12
0
    def nicv2(self, db, idList, top):

        start_time = time.time()
        idListLen = len(idList)
        errList = []
        meanList = []
        varList = []

        cmd = "SELECT message, cipher, data FROM trace WHERE id = '" + str(idList[0]) + "'"
        db.cur.execute(cmd)
        one = db.cur.fetchone()
        tmsg, tcrypt, traw_data = one

        parse_data = parse_binary(str(traw_data))
        points = len(parse_data)
        print "Points in trace:", points

        print "Calculating ..."

        #classList = [[0.0] * points]*256
        tclassList = [0.0] * points
        tMeanList = [0.0] * points
        tPowerMeanList = [0.0] * points
        parallels = 20
        #tGlobalVarlist = [[]]*parallels
        tGlobalVarlist = [[] for i in range(parallels)]
        classList = []*256
        #globalVarlist = [0.0] * points
        globalVarlist = []
        byteList = []

        #classList.append([1, 2, 3])
        #classList.append([1, 2, 3])
        #print "Test len:", len(classList[0]), len(classList[1])

        collected = 0
        classCalcCount = 0
        classCountNotIncluded = 0
        tracesInClass = 0

        #for i in range(points):
        for i in range(points//parallels):

            start_time = time.time()
            if collected == 0:
                for j in range (idListLen):
                    err = 0

                    cmd = "SELECT message, data FROM trace WHERE id = '" + str(idList[j]) + "'"
                    db.cur.execute(cmd)
                    one = db.cur.fetchone()
                    msg, raw_data = one

                    try:
                        parse_data = parse_binary(str(raw_data))
                    except:
                        err = 1
                        errList.append(idList[j])
                        #print 'Error. Trace ID: ', idList[j]

                    if err == 0:
                        for k in range(parallels):
                            tGlobalVarlist[k].append(parse_data[i*parallels+k])

                        parse_data = None
                        byteList.append((int(msg[0:2], 16), idList[j]))

                    if j%parallels == 0 and j != 0:
                        print j, "traces were processed."

                for m in range(parallels):
                    globalVarlist.append(numpy.var(numpy.array(tGlobalVarlist[m])))

                tGlobalVarlist = None
                tGlobalVarlist = [[] for m in range(parallels)]

                collected = 1

            else:

                for j in range (idListLen):
                    err = 0

                    cmd = "SELECT message, data FROM trace WHERE id = '" + str(idList[j]) + "'"
                    db.cur.execute(cmd)
                    #one = db.cur.fetchone()
                    msg, raw_data = db.cur.fetchone()
                    msg = None

                    try:
                        parse_data = parse_binary(str(raw_data))
                    except:
                        err = 1

                    if err == 0:
                        for k in range(parallels):
                            tGlobalVarlist[k].append(parse_data[i*parallels+k])

                        parse_data = None

                    if j%20000 == 0:
                        print j, "traces were processed."

                for m in range(parallels):
                    globalVarlist.append(numpy.var(numpy.array(tGlobalVarlist[m])))

                print "TEST:", len(globalVarlist)

                tGlobalVarlist = None
                tGlobalVarlist = [[] for m in range(parallels)]

        lenn = len(byteList)
        for i in range(256):
            tracesInClass = 0
            for j in range(lenn):
                byte, id = byteList[j]
                if byte == i:
                    cmd = "SELECT message, data FROM trace WHERE id = '" + str(id) + "'"
                    db.cur.execute(cmd)
                    #one = db.cur.fetchone()
                    msg, raw_data = db.cur.fetchone()
                    msg = None
                    parse_data = parse_binary(str(raw_data))
                    for d in range(points):
                        tMeanList[d] = tMeanList[d] + parse_data[d]
                        tPowerMeanList[d] = tPowerMeanList[d] + parse_data[d]**2
                        tracesInClass = tracesInClass + 1

                    parse_data = None

            if tracesInClass > 0:
                for m in range(points):
                    #tMeanList[m] = tMeanList[m]/tracesInClass
                    tPowerMeanList[m] = tPowerMeanList[m]/tracesInClass - (tMeanList[m]/tracesInClass)**2
                print "Var[E(Y|X)] for class", i, "was calculated"
                classList.append(tPowerMeanList)
            else:
                classCountNotIncluded = classCountNotIncluded + 1
                print "Class was empty!"

            tMeanList = [0.0] * points
            tPowerMeanList = [0.0] * points

        print "Classes:", 256 - classCountNotIncluded

        for i in range(256 - classCountNotIncluded):
            for j in range(points):
                if i != 0:
                    classList[0][j] = classList[0][j] + classList[i][j]

        print "Total Var[E(Y|X)] for all classes was calculated!"

        lenn = points//parallels*parallels
        for i in range(lenn):
            globalVarlist[i] = math.sqrt(classList[0][i]/globalVarlist[i]/lenn)

        print "NICV function was calculated!"
        print 'Execution time:', time.time() - start_time

        t = open("nicv", "w")
        for i in range(lenn):
            t.write(str(globalVarlist[i]) + ' ')
        t.close()

        plt.bar(range(0, lenn), globalVarlist, color='g')
        plt.ylabel('NICV')
        plt.xlabel('samples')
        plt.colors()
        plt.show()