示例#1
0
def distance(target, source, insertcost, deletecost, replacecost):
    n = len(target) + 1
    m = len(source) + 1
    # set up dist and initialize values
    dist = [[0 for j in range(m)] for i in range(n)]
    for i in range(1, n):
        dist[i][0] = dist[i - 1][0] + insertcost
    for j in range(1, m):
        dist[0][j] = dist[0][j - 1] + deletecost
    # align source and target strings
    for j in range(1, m):
        for i in range(1, n):
            inscost = insertcost + dist[i - 1][j]
            delcost = deletecost + dist[i][j - 1]
            if (source[j - 1] == target[i - 1]): add = 0
            else: add = replacecost
            substcost = add + dist[i - 1][j - 1]
            dist[i][j] = min(inscost, delcost, substcost)

    # save a hinton plot of the distance matrix
    normdist = [[(dist[i][j] / dist[n - 1][m - 1]) - 0.5 for j in range(m)]
                for i in range(n)]
    hinton.hinton(numpy.array(normdist))
    plt.title("Distance matrix")
    plt.savefig('distance_hinton.png', format='png')

    # return min edit distance
    return dist[n - 1][m - 1]
示例#2
0
文件: AIMA.py 项目: felix1m/ki
def clear():
    # einzelne umgebungen:
    global gesetzt
    global noch_da
    global pfad
    pfad = ""
    noch_da = 4
    gesetzt = False

    global rejection_sam_var
    rejection_sam_var = [graph, "", "", 20]

    global schwellwertffn
    global hintonnumberffn
    global alphaffn
    global ownexample
    ownexample = ""
    schwellwertffn.set(2.5)
    hintonnumberffn.set(20)
    alphaffn.set(0.2)

    # fuer alle:
    anzeigefenster.delete("all")
    printfenster.delete(1.0, END)
    if not error:
        zeichne.draw(graph, "", "")

    umgebung = graph.__class__.__name__

    if umgebung == "FFNetwork":
        matrix = np.empty([15, 15])  # fuer hinton diagramm
        #kantengewichte wieder zufaellig machen!
        for node in graph.get_nodes():
            for edge in node.get_edges():
                weight = random.randint(0, 100) / 100
                edge.set_weight(weight)
            # matrix fuellen:
        for zeile in range(15):
            for spalte in range(15):
                matrix[zeile][spalte] = 0
        for node in graph.get_nodes():
            name = node.name()
            for edge in node.get_edges():
                dest = edge.end().name()
                weight = edge.weight
                # ueberpruefung worein es geschireben wird:
                if name[0] == "I":
                    oben = int(name[1])
                elif name[0] == "H":
                    oben = int(name[1]) + 9
                if dest[0] == "H":
                    runter = int(dest[1]) + 9
                else:
                    runter = 14
                matrix[oben][14 - runter] = weight
                matrix[runter][14 - oben] = weight
        hinton.hinton(matrix)
                
                fig = plt.figure()
                plt.subplot(311)
                plt.imshow(mean_aligned)
                plt.subplot(312)
                plt.imshow(mean_unaligned)
                plt.subplot(313)
                plt.plot(np.sum(mean_aligned,axis=0),label = 'Aligned')
                plt.plot(np.sum(mean_unaligned,axis=0), label = 'Unaligned')
                plt.legend()
                fig.savefig(folder_name + '/' + 'var_%ist_mean.png' % (model_num_states))
                plt.close(fig)
                
                # Plot neuron emissions
                fig = plt.figure()
                hinton(p_emissions)
                fig.savefig(folder_name + '/' + 'var_emissions_%ist.png' % model_num_states)
                plt.close(fig)
                
               print('file%i, %s, model%i, taste%i' %(file,cond_dir,model_num_states,taste))
#   _____      _            _       _          _____                
#  / ____|    | |          | |     | |        / ____|               
# | |     __ _| | ___ _   _| | __ _| |_ ___  | |     ___  _ __ _ __ 
# | |    / _` | |/ __| | | | |/ _` | __/ _ \ | |    / _ \| '__| '__|
# | |___| (_| | | (__| |_| | | (_| | ||  __/ | |___| (_) | |  | |   
#  \_____\__,_|_|\___|\__,_|_|\__,_|\__\___|  \_____\___/|_|  |_|   
#
# Start -> tastes x neurons x trials x time
# For every time bin, correlate firing across all trials of a neuron with taste ranks -> neurons x time
                # e.g. 15 data points for every palatability rank if 15 trials per taste
# Average ABSOLUTE correlations across neurons -> time
示例#4
0
文件: main.py 项目: grappli/pm1
                            parameters=cg.parameters,
                            step_rule=Scale(learning_rate=0.1))

train_set = H5PYDataset('mushrooms.hdf5', which_sets=('train',))
train_stream = DataStream.default_stream(
    train_set, iteration_scheme=SequentialScheme(
        train_set.num_examples, batch_size=128))

test_set = H5PYDataset('mushrooms.hdf5', which_sets=('test',))
test_stream = DataStream.default_stream(
    test_set, iteration_scheme=SequentialScheme(
        test_set.num_examples, batch_size=128))

main = MainLoop(
    model=Model(cost),
    data_stream=train_stream,
    algorithm=algorithm,
    extensions=[
        FinishAfter(after_n_epochs=10),
        Printing(),
        TrainingDataMonitoring([cost, error_rate], after_batch=True, prefix='train'),
        DataStreamMonitoring([cost, error_rate], after_batch=True, data_stream=test_stream, prefix='test'),
        Plot('Train',
             channels=[['train_cost', 'test_cost'], ['train_error_rate', 'test_error_rate']])
    ])

main.run()

hinton(W1.get_value())
hinton(W2.get_value())
示例#5
0
                ### MAP Outputs ###
                alpha, beta, scaling, expected_latent_state, expected_latent_state_pair = model_MAP.E_step(
                )
                # Save figures in appropriate directories
                for i in range(data.shape[0]):
                    fig = plt.figure()
                    raster(data=data[i, :],
                           expected_latent_state=expected_latent_state[:,
                                                                       i, :])
                    fig.savefig(folder_name + '/' + '%i_map_%ist.png' %
                                (i, model_num_states))
                    plt.close(fig)

                fig = plt.figure()
                hinton(model_MAP.p_transitions.T)
                plt.title('Log_lik = %f' % model_MAP.log_posterior[-1])
                plt.suptitle('Model converged = ' + str(model_MAP.converged))
                fig.savefig(folder_name + '/' +
                            'hinton_map_%ist.png' % model_num_states)
                plt.close(fig)

                # Save data in appropriate spots in HDF5 file
                node_name = '/map_hmm/%s/taste_%i/states_%i' % (
                    cond_dir, taste, model_num_states)

                try:
                    hf5.remove_node(node_name, recursive=True)
                except:
                    pass
                threshold=1e-4,
                n_cpu=mp.cpu_count())
            ### MAP Outputs ###
            alpha, beta, scaling, expected_latent_state, expected_latent_state_pair = model_MAP.E_step(
            )

            for i in range(data.shape[0]):
                fig = plt.figure()
                raster(data[i, :],
                       expected_latent_state=expected_latent_state[:, i, :])
                plt.savefig(plot_dir + '/' + 'clust%i_%i_map_%ist.png' %
                            (cluster, i, model_num_states))
                plt.close(fig)

            plt.figure()
            hinton(model_MAP.p_transitions)
            plt.title('log_post = %f' % model_MAP.log_posterior[-1])
            plt.suptitle('Model converged = ' + str(model_MAP.converged))
            plt.savefig(plot_dir + '/' + 'transitions_map_clust%i_%ist.png' %
                        (cluster, model_num_states))
            plt.close()

            plt.figure()
            hinton(model_MAP.p_emissions)
            plt.title('log_post = %f' % model_MAP.log_posterior[-1])
            plt.suptitle('Model converged = ' + str(model_MAP.converged))
            plt.savefig(plot_dir + '/' + 'emissions_map_clust%i_%ist.png' %
                        (cluster, model_num_states))
            plt.close()

            plt.figure()
示例#7
0
            A[idx,x_theory.index(temp[1])] = -1/4
            A[x_theory.index(temp[1]),idx] = -1/3 * 1/Unq[temp[1]]  # for u_k, Partial fu
        B[idx,0] = 1/4 * (Na[temp[0]]/maxNa + vq[temp[0]]/maxvq + avgsumva[temp[0]]/maxavgsumva)
    else:
        ## Other PostTypeId
        A[idx,idx] = 1
    idx += 1

for user in ulist:
    A[idx,idx] = 1
    B[idx,0] = 0 # 1/3 * (repu[user.split(':')[0]]/maxrepu)
    #B[idx,0] = 1/3 * (repu[user.split(':')[0]]/maxrepu)
    idx += 1

if hinplot == 1:
    hinton(A, 'Amat')
    hinton(B, 'Bvec')
solution = np.linalg.solve(A,B)
out = dict(zip(x_theory,solution))
#pprint.pprint(out)

if hinplot == 1:
    hinton(solution, 'Sol')

## Analysing results
ans = {}
ques = {}
uuser = {}
userQ = {}
anslist = []
queslist = []
示例#8
0
                                             batch_size=128))

test_set = H5PYDataset('mushrooms.hdf5', which_sets=('test', ))
test_stream = DataStream.default_stream(test_set,
                                        iteration_scheme=SequentialScheme(
                                            test_set.num_examples,
                                            batch_size=128))

main = MainLoop(model=Model(cost),
                data_stream=train_stream,
                algorithm=algorithm,
                extensions=[
                    FinishAfter(after_n_epochs=10),
                    Printing(),
                    TrainingDataMonitoring([cost, error_rate],
                                           after_batch=True,
                                           prefix='train'),
                    DataStreamMonitoring([cost, error_rate],
                                         after_batch=True,
                                         data_stream=test_stream,
                                         prefix='test'),
                    Plot('Train',
                         channels=[['train_cost', 'test_cost'],
                                   ['train_error_rate', 'test_error_rate']])
                ])

main.run()

hinton(W1.get_value())
hinton(W2.get_value())
示例#9
0
def PCA_missing_data(plot=True):
	#Principal Component Analysis, with randomly missing data
	q = 2 #latent dimension
	d = 5 #observation dimension
	N = 200
	niters = 200
	Nmissing = 100
	true_W = np.random.randn(d,q)
	true_Z = np.random.randn(N,q)
	true_mean = np.random.randn(d,1)
	true_prec = 20.
	Xdata_full = np.dot(true_Z,true_W.T) + true_mean.T 
	Xdata_observed = Xdata_full + np.random.randn(N,d)*np.sqrt(1./true_prec)
	
	#erase some data
	missing_index_i = np.argsort(np.random.randn(N))[:Nmissing]
	missing_index_j = np.random.multinomial(1,np.ones(d)/d,Nmissing).nonzero()[1]
	Xdata = Xdata_observed.copy()
	Xdata[missing_index_i,missing_index_j] = np.nan
	
	
	#set up the problem...
	Ws = [nodes.Gaussian(d,np.zeros((d,1)),np.eye(d)*1e-3) for  i in range(q)]
	W = nodes.hstack(Ws)
	Mu = nodes.Gaussian(d,np.zeros((d,1)),np.eye(d)*1e-3)
	Beta = nodes.Gamma(d,1e-3,1e-3)
	Zs = [nodes.Gaussian(q,np.zeros((q,1)),np.eye(q)) for i in range(N)]
	Xs = [nodes.Gaussian(d,W*z+Mu,Beta) for z in Zs]
	[xnode.observe(xval.reshape(d,1)) for xnode,xval in zip(Xs,Xdata)]
	
	#make a network object
	net = Network()
	net.addnode(W)
	net.fetch_network()# automagically fetches all of the other nodes...
	
	#infer!
	net.learn(100)
		
	#plot
	if plot:
		import pylab
		import hinton
		#compare true and learned W 
		Qtrue,Rtrue = np.linalg.qr(true_W)
		Qlearn,Rlearn = np.linalg.qr(W.pass_down_Ex())
		pylab.figure();pylab.title('True W')
		hinton.hinton(Qtrue)
		pylab.figure();pylab.title('E[W]')
		hinton.hinton(Qlearn)
		
		if q==2:#plot the latent variables
			pylab.figure();pylab.title('true Z')
			pylab.scatter(true_Z[:,0],true_Z[:,1],50,true_Z[:,0])
			pylab.figure();pylab.title('learned Z')
			learned_Z = np.hstack([z.pass_down_Ex() for z in Zs]).T
			pylab.scatter(learned_Z[:,0],learned_Z[:,1],50,true_Z[:,0])
			
		#recovered X mean
		X_rec = np.hstack([x.pass_down_Ex() for x in Xs]).T
		
		#Recovered X Variance
		#slight hack here - set q variance of observed nodes to zeros (it should be random...)
		for x in Xs:
			if x.observed:
				x.qcov *=0
		var_rec = np.vstack([np.diag(x.qcov) for x in Xs]) + 1./np.diag(Beta.pass_down_Ex())
		
		#plot each recovered signal in a separate figure
		for i in range(d):
			pylab.figure();pylab.title('recovered_signal '+str(i))
			
			pylab.plot(Xdata_full[:,i],'g',marker='.',label='True') # 'true' values of missing data (without noise)
			pylab.plot(X_rec[:,i],'b',label='Recovered') # recovered mising data values
			pylab.plot(Xdata[:,i],'k',marker='o',linewidth=2,label='Observed') # with noise, and holes where we took out values
			pylab.legend()
			
			volume_x = np.hstack((np.arange(len(Xs)),np.arange(len(Xs))[::-1]))
			volume_y = np.hstack((X_rec[:,i]+2*np.sqrt(var_rec[:,i]), X_rec[:,i][::-1]-2*np.sqrt(var_rec[:,i])[::-1]))
			pylab.fill(volume_x,volume_y,'b',alpha=0.3)
			
		
		
		print '\nBeta'
		print true_prec,Beta.pass_down_Ex()[0,0]
		print '\nMu'
		print np.hstack((true_mean,Mu.pass_down_Ex()))
		pylab.show()
示例#10
0
		Xs.reverse()
		[a.update() for a in As]
		[c.update() for c in Cs]
		Q.update()
		R.update()
		print niters-i
		
	#plot
	if plot:
		import pylab
		import hinton
		#plot hintons of learned (and true) matrices.
		pylab.figure()
		pylab.subplot(1,2,1)
		pylab.title('True A')
		hinton.hinton(true_A)
		pylab.subplot(1,2,2)
		pylab.title('E[A]')
		hinton.hinton(A.pass_down_Ex())
		
		pylab.figure()
		pylab.subplot(1,2,1)
		pylab.title('True C')
		hinton.hinton(true_C)
		pylab.subplot(1,2,2)
		pylab.title('E[C]')
		hinton.hinton(C.pass_down_Ex())
		
		pylab.figure()
		pylab.subplot(2,2,1)
		pylab.title('True Q')
示例#11
0
文件: AIMA.py 项目: felix1m/ki
def restaurantffn_net_topology():
    global graph
    global error
    global ffnname

    number_nodes = [0, 0]
    graph = FFNetwork("", anzeigefenster, printfenster, ki)  # fuer clear_gui

    folder = os.path.abspath(os.path.dirname("AIMA.py"))
    folder = folder.split("\\aima")[0]
    path = folder + "\\feedforward"

    #path = os.path.abspath(os.path.dirname(__file__))
    # datei mit selben namen.ffn wie bsp werden geoeffnet
    ffnname = ffnname.split("/")
    ffnname = ffnname[-1]
    ffnname = ffnname.split(".")
    ffnname = ffnname[0]
    name = str(path) + "/" + ffnname + ".ffn"
    print(name)
    try:
        datei = open(name).read()
        matrix = np.empty([15, 15])  # fuer hinton diagramm
        szenario = []
        for string in datei.split("\n"):
            szenario.append(string)
        name = szenario[0]
        numberofnodes = int(szenario[1])
        numberofedges = int(szenario[2 + numberofnodes])
        graph = FFNetwork(name, anzeigefenster, printfenster, ki)
        # knoten und kanten in graphen aufnehmen:
        for element in range(2, 2 + numberofnodes, 1):
            szenario[element] = szenario[element].split()
            nameofnode = szenario[element][0]
            nodetype = szenario[element][1]
            graph.add_node(nameofnode, nodetype)
            if nameofnode[0] == "I":
                number_nodes[0] += 1
            if nameofnode[0] == "H":
                number_nodes[1] += 1
        for element in range(3 + numberofnodes,
                             3 + numberofnodes + numberofedges, 1):
            szenario[element] = szenario[element].split()
            start = szenario[element][0]
            end = szenario[element][1]
            start = graph.get_node(start)
            end = graph.get_node(end)
            weight = random.randint(0, 100) / 100
            graph.add_edge(start.name(), end.name(), weight)
            # matrix fuellen:
        for zeile in range(15):
            for spalte in range(15):
                matrix[zeile][spalte] = 0
        for node in graph.get_nodes():
            name = node.name()
            for edge in node.get_edges():
                dest = edge.end().name()
                weight = edge.weight
                # ueberpruefung worein es geschireben wird:
                if name[0] == "I":
                    oben = int(name[1])
                elif name[0] == "H":
                    oben = int(name[1]) + 9
                if dest[0] == "H":
                    runter = int(dest[1]) + 9
                else:
                    runter = 14
                matrix[oben][14 - runter] = weight
                matrix[runter][14 - oben] = weight
        hinton.hinton(matrix)
        # unspruengliche Matrix mit noch zufaelligen Werten anzeigen:
        plt.show()
        # testen ob genug input/output
        output = 0
        input = 0
        for node in graph.get_nodes():
            if node.get_type() == "output":
                output += 1
            if node.get_type() == "input":
                input += 1
        if (output == 0 or input == 0):
            printfenster.insert(END, "Not enough Input/Outputnodes")
        zeichne.draw(graph, "", "")
        return number_nodes
    except:
        error = True
        printfenster.insert(END, "Error\n")
示例#12
0
    #a missing data problem
    Nmissing = int(N * missing_pc / 100)
    observed2 = observed.copy()
    missingi = np.argsort(np.random.rand(N))[:Nmissing]
    missingj = np.random.randint(0, d - q,
                                 Nmissing)  #last q columns will be complete
    observed2[missingi, missingj] = np.NaN

    b = PCA_EM_missing(observed2, q)
    b.learn(niters)

    from hinton import hinton
    import pylab
    colours = np.arange(N)  # to colour the dots with
    hinton(linalg.qr(trueW.T)[1].T)
    pylab.title('true transformation')
    pylab.figure()
    hinton(linalg.qr(a.W.T)[1].T)
    pylab.title('reconstructed transformation')
    pylab.figure()
    hinton(linalg.qr(b.W.T)[1].T)
    pylab.title('reconstructed transformation (missing data)')
    pylab.figure()
    pylab.subplot(3, 1, 1)
    pylab.plot(latents)
    pylab.title('true latents')
    pylab.subplot(3, 1, 2)
    pylab.plot(a.m_Z)
    pylab.title('reconstructed latents')
    pylab.subplot(3, 1, 3)
示例#13
0
for t in range(50,150):
    g[t].weights[4:7] = np.ones(3)


f,Y = model.simulate(g)
f_est,P,M = model.estimate_fields(g,Y)
f_est = [f0] + f_est

#model.estimate_kernels(f,g,Y)

if True:
    vmin = -2
    vmax = 5
    plt.figure()
    hinton.hinton(model.LDS.A)
    plt.figure()
    plt.subplot(1,3,1)
    Z = []
    for fi in f:
        Z.append([fi(s) for s in np.linspace(2,8,100)])
    Z_true = np.array(Z)
    plt.imshow(Z_true,vmin=vmin,vmax=vmax)
    plt.xlabel('space')
    plt.ylabel('time')
    plt.title('true field')
    plt.subplot(1,3,2)
    Z = []
    for fi in f_est:
        Z.append([fi(s) for s in np.linspace(2,8,100)])
    Z_est = np.array(Z)
示例#14
0
              idx] = -1 / 3 * 1 / Unq[temp[1]]  # for u_k, Partial fu
        B[idx, 0] = 1 / 4 * (Na[temp[0]] / maxNa + vq[temp[0]] / maxvq +
                             avgsumva[temp[0]] / maxavgsumva)
    else:
        ## Other PostTypeId
        A[idx, idx] = 1
    idx += 1

for user in ulist:
    A[idx, idx] = 1
    B[idx, 0] = 0  # 1/3 * (repu[user.split(':')[0]]/maxrepu)
    #B[idx,0] = 1/3 * (repu[user.split(':')[0]]/maxrepu)
    idx += 1

if hinplot == 1:
    hinton(A, 'Amat')
    hinton(B, 'Bvec')
solution = np.linalg.solve(A, B)
out = dict(zip(x_theory, solution))
#pprint.pprint(out)

if hinplot == 1:
    hinton(solution, 'Sol')

## Analysing results
ans = {}
ques = {}
uuser = {}
userQ = {}
anslist = []
queslist = []
示例#15
0
	
	#a missing data problem
	Nmissing = int(N*missing_pc/100)
	observed2 = observed.copy()
	missingi = np.argsort(np.random.rand(N))[:Nmissing]
	missingj = np.random.randint(0,d-q,Nmissing)#last q columns will be complete
	observed2[missingi,missingj] = np.NaN
	
	b = PCA_EM_missing(observed2,q)
	b.learn(niters)
	
	
	from hinton import hinton
	import pylab
	colours = np.arange(N)# to colour the dots with
	hinton(linalg.qr(trueW.T)[1].T)
	pylab.title('true transformation')
	pylab.figure()
	hinton(linalg.qr(a.W.T)[1].T)
	pylab.title('reconstructed transformation')
	pylab.figure()
	hinton(linalg.qr(b.W.T)[1].T)
	pylab.title('reconstructed transformation (missing data)')
	pylab.figure()
	pylab.subplot(3,1,1)
	pylab.plot(latents)
	pylab.title('true latents')
	pylab.subplot(3,1,2)
	pylab.plot(a.m_Z)
	pylab.title('reconstructed latents')
	pylab.subplot(3,1,3)