Ejemplo n.º 1
0
def fitGLM(X, Y, H, l, hl, sp, norm, of, lateral, num_neurons_to_estimate):
    num_pres, num_neurons = numpy.shape(Y)
    num_pres, kernel_size = numpy.shape(X)

    if H != None:
        (trash, hist_size) = numpy.shape(H)
    else:
        hist_size = 0

    Ks = numpy.zeros((num_neurons, kernel_size + 2 + hist_size + lateral * (num_neurons - 1)))

    laplace = laplaceBias(numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))

    rpi = numpy.linalg.pinv(X.T * X + __main__.__dict__.get("RPILaplaceBias", 0.0001) * laplace) * X.T * Y
    for i in xrange(0, num_neurons_to_estimate):
        print i
        k0 = (
            rpi[:, i].getA1().tolist()
            + [0, 0]
            + numpy.zeros((1, hist_size)).flatten().tolist()
            + numpy.zeros((1, lateral * (num_neurons - 1))).flatten().tolist()
        )
        if lateral and H != None:
            HH = numpy.hstack((H, Y[:, :i], Y[:, i + 1 :]))
        elif lateral:
            HH = numpy.hstack((Y[:, :i], Y[:, i + 1 :]))
        else:
            HH = H

        glm = GLM(numpy.mat(X), numpy.mat(Y[:, i]), l * laplace, HH, hl, sp, norm, of=of)

        K = fmin_ncg(glm.func(), numpy.array(k0), glm.der(), fhess=glm.hess(), avextol=0.0000001, maxiter=200)
        Ks[i, :] = K

    return [Ks, rpi, glm]
Ejemplo n.º 2
0
def fitGLM(X,Y,H,l,hl,sp,norm,of,lateral,num_neurons_to_estimate):
    num_pres,num_neurons = numpy.shape(Y)
    num_pres,kernel_size = numpy.shape(X)
    
    if H != None:
       (trash,hist_size) = numpy.shape(H)
    else:
       hist_size = 0
       
    Ks = numpy.zeros((num_neurons,kernel_size+2+hist_size+lateral*(num_neurons-1)))
    
    laplace = laplaceBias(numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))
    
    rpi = numpy.linalg.pinv(X.T*X + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplace) * X.T * Y 
    for i in xrange(0,num_neurons_to_estimate): 
	print i
	k0 = rpi[:,i].getA1().tolist()+[0,0] + numpy.zeros((1,hist_size)).flatten().tolist()  + numpy.zeros((1,lateral*(num_neurons-1))).flatten().tolist()
	if lateral and H != None:
	   HH = numpy.hstack((H,Y[:,:i],Y[:,i+1:]))
	elif lateral:
	   HH = numpy.hstack((Y[:,:i],Y[:,i+1:]))
	else:
	   HH = H
	
	glm = GLM(numpy.mat(X),numpy.mat(Y[:,i]),l*laplace,HH,hl,sp,norm,of=of)

	K = fmin_ncg(glm.func(),numpy.array(k0),glm.der(),fhess = glm.hess(),avextol=0.0000001,maxiter=200)
	Ks[i,:] = K
	
    return [Ks,rpi,glm]
Ejemplo n.º 3
0
def sequentialFilterFinding():
    d = contrib.dd.loadResults("newest_dataset.dat")
    (sizex, sizey, training_inputs, training_set, validation_inputs,
     validation_set, ff, db_node) = sortOutLoading(d)
    raw_validation_set = db_node.data["raw_validation_set"]
    contrib.modelfit.save_fig_directory = '/home/antolikjan/Doc/reports/Sparsness/SequentialFilterFitting/'

    params = {}
    params["SequentialFilterFitting"] = True
    db_node = db_node.get_child(params)

    params = {}
    params["alpha"] = __main__.__dict__.get('Alpha', 0.02)
    params["num_neurons"] = __main__.__dict__.get('NumNeurons', 10)
    params["OF"] = __main__.__dict__.get('OF', 'Square')
    db_node = db_node.get_child(params)

    num_neurons_to_run = params["num_neurons"]

    training_set = numpy.mat(training_set)[:, 0:num_neurons_to_run]
    validation_set = numpy.mat(validation_set)[:, 0:num_neurons_to_run]
    training_inputs = numpy.mat(training_inputs)
    validation_inputs = numpy.mat(validation_inputs)
    raw_validation_set = numpy.array(raw_validation_set)[:, :,
                                                         0:num_neurons_to_run]

    num_pres, kernel_size = numpy.shape(training_inputs)

    laplace = laplaceBias(numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))
    rpi = numpy.linalg.pinv(training_inputs.T * training_inputs +
                            __main__.__dict__.get('RPILaplaceBias', 0.0001) *
                            laplace) * training_inputs.T * training_set

    new_training_inputs = numpy.zeros(numpy.shape(training_inputs))
    new_validation_inputs = numpy.zeros(numpy.shape(validation_inputs))

    Ks = numpy.zeros((num_neurons_to_run, kernel_size * 2 + 4))

    second_pred_act = []
    second_pred_val_act = []

    for i in xrange(0, num_neurons_to_run):
        print i
        # project out STA
        a = rpi[:, i] / numpy.sqrt(numpy.power(rpi[:, i], 2))

        for j in xrange(0, numpy.shape(training_inputs)[0]):
            new_training_inputs[j, :] = training_inputs[j, :] - a.T * (
                (training_inputs[j, :] * a)[0, 0])

        for j in xrange(0, numpy.shape(validation_inputs)[0]):
            new_validation_inputs[j, :] = validation_inputs[j, :] - a.T * (
                (validation_inputs[j, :] * a)[0, 0])

        k0 = numpy.zeros(
            (1, kernel_size * 2)).flatten().tolist() + [0, 0, 0, 0]
        scm = SimpleContextualModel(numpy.mat(new_training_inputs),
                                    numpy.mat(training_set[:, i]),
                                    laplace,
                                    of1=params["OF"],
                                    of2='Zero')
        #K = fmin_ncg(scm.func(),numpy.array(k0),scm.der(),fhess = scm.hess(),avextol=0.0000001,maxiter=20)
        (K, success, c) = fmin_tnc(scm.func(),
                                   numpy.array(k0)[:],
                                   fprime=scm.der(),
                                   maxfun=1000,
                                   messages=0)
        Ks[i, :] = K

        second_pred_act.append(
            scm.response(new_training_inputs, numpy.array([K])))
        second_pred_val_act.append(
            scm.response(new_validation_inputs, numpy.array([K])))

    second_pred_act = numpy.hstack(second_pred_act)
    second_pred_val_act = numpy.hstack(second_pred_val_act)

    showRFS(
        numpy.reshape(Ks[:, 0:kernel_size],
                      (-1, numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))))
    release_fig('K1.png')
    showRFS(
        numpy.reshape(Ks[:, kernel_size:2 * kernel_size],
                      (-1, numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))))
    release_fig('K2.png')

    rpi_pred_act = training_inputs * rpi
    rpi_pred_val_act = validation_inputs * rpi

    #ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(rpi_pred_act),num_bins=10,display=True)
    #rpi_pred_act_t = numpy.mat(apply_output_function(numpy.mat(rpi_pred_act),ofs))
    #rpi_pred_val_act_t = numpy.mat(apply_output_function(numpy.mat(rpi_pred_val_act),ofs))

    #ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(second_pred_act),num_bins=10,display=True)
    #second_pred_act_t = numpy.mat(apply_output_function(numpy.mat(second_pred_act),ofs))
    #second_pred_val_act_t = numpy.mat(apply_output_function(numpy.mat(second_pred_val_act),ofs))

    visualize2DOF(rpi_pred_act, numpy.mat(second_pred_act), training_set)

    visualize2DOF(rpi_pred_val_act, numpy.mat(second_pred_val_act),
                  validation_set)

    #pred_act = second_pred_act_t + rpi_pred_act_t #numpy.multiply(second_pred_act_t,rpi_pred_act_t)
    #pred_val_act = second_pred_val_act_t + rpi_pred_val_act_t #numpy.multiply(second_pred_val_act_t,rpi_pred_val_act_t)

    of = fit2DOF(rpi_pred_act, numpy.mat(second_pred_act), training_set)

    pred_act = apply2DOF(rpi_pred_act, numpy.mat(second_pred_act), of)
    pred_val_act = apply2DOF(rpi_pred_val_act, numpy.mat(second_pred_val_act),
                             of)

    compareModelPerformanceWithRPI(training_set, validation_set,
                                   training_inputs, validation_inputs,
                                   numpy.mat(pred_act),
                                   numpy.mat(pred_val_act),
                                   numpy.array(raw_validation_set),
                                   'BilinearModel')
Ejemplo n.º 4
0
def runSCM():
    res = contrib.dd.loadResults("newest_dataset.dat")
    (sizex, sizey, training_inputs, training_set, validation_inputs,
     validation_set, ff, db_node) = contrib.JanA.dataimport.sortOutLoading(res)
    raw_validation_set = db_node.data["raw_validation_set"]

    params = {}
    params["SCM"] = True
    db_node = db_node.get_child(params)

    params = {}
    params["CLaplacaBias"] = __main__.__dict__.get('CLaplaceBias', 0.0004)
    params["SLaplacaBias"] = __main__.__dict__.get('SLaplaceBias', 0.0004)
    params["OF1"] = __main__.__dict__.get('OF1', 'Exp')
    params["OF2"] = __main__.__dict__.get('OF2', 'Square')
    params["num_neurons"] = __main__.__dict__.get('NumNeurons', 103)

    # creat history
    training_set = numpy.mat(training_set)
    validation_set = numpy.mat(validation_set)
    training_inputs = numpy.mat(training_inputs)
    validation_inputs = numpy.mat(validation_inputs)

    db_node1 = db_node
    db_node = db_node.get_child(params)

    num_pres, num_neurons = numpy.shape(training_set)
    num_pres, kernel_size = numpy.shape(training_inputs)
    num_neurons_to_run = params["num_neurons"]

    Ks = numpy.zeros((num_neurons, kernel_size * 2 + 4))

    print 'Kernel size', kernel_size

    laplace = laplaceBias(numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))

    rpi = numpy.linalg.pinv(training_inputs.T * training_inputs +
                            __main__.__dict__.get('RPILaplaceBias', 0.0001) *
                            laplace) * training_inputs.T * training_set

    for i in xrange(0, num_neurons_to_run):
        print i
        #k0 = rpi[:,i].getA1().tolist() +  numpy.zeros((1,kernel_size)).flatten().tolist() + [0,0]
        k0 = numpy.zeros(
            (1, kernel_size * 2)).flatten().tolist() + [0, 0, 0, 0]
        scm = SimpleContextualModel(numpy.mat(training_inputs),
                                    numpy.mat(training_set[:, i]),
                                    laplace,
                                    of1=params["OF1"],
                                    of2=params["OF2"])

        #K = fmin_ncg(scm.func(),numpy.array(k0),scm.der(),fhess = scm.hess(),avextol=0.0000001,maxiter=20)
        (K, success, c) = fmin_tnc(scm.func(),
                                   numpy.array(k0)[:],
                                   fprime=scm.der(),
                                   maxfun=10000,
                                   messages=0)
        #print success
        #print c
        Ks[i, :] = K

    pred_act = scm.response(training_inputs, Ks)
    pred_val_act = scm.response(validation_inputs, Ks)

    print Ks[0, :]

    showRFS(
        numpy.reshape(Ks[:, 0:kernel_size],
                      (-1, numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))))
    release_fig('K1.png')
    showRFS(
        numpy.reshape(Ks[:, kernel_size:2 * kernel_size],
                      (-1, numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))))
    release_fig('K2.png')

    compareModelPerformanceWithRPI(
        training_set[:, :num_neurons_to_run],
        validation_set[:, :num_neurons_to_run], training_inputs,
        validation_inputs,
        numpy.mat(pred_act)[:, :num_neurons_to_run],
        numpy.mat(pred_val_act)[:, :num_neurons_to_run],
        numpy.array(raw_validation_set)[:, :, :num_neurons_to_run], 'SCM')

    db_node.add_data("Kernels", Ks, force=True)
    db_node.add_data("GLM", scm, force=True)
Ejemplo n.º 5
0
def fitLSCSMEvo(X,Y,num_lgn,num_neurons_to_estimate):
    num_pres,num_neurons = numpy.shape(Y)
    num_pres,kernel_size = numpy.shape(X)
    
    Ks = numpy.ones((num_neurons,num_lgn*4+1))
    
    laplace = laplaceBias(numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))
	
    setOfAlleles = GAllele.GAlleles()
    bounds = []
    	
    for j in xrange(0,num_lgn):
	setOfAlleles.add(GAllele.GAlleleRange(6,(numpy.sqrt(kernel_size)-6),real=True))
	bounds.append((6,(numpy.sqrt(kernel_size)-6)))
	setOfAlleles.add(GAllele.GAlleleRange(6,(numpy.sqrt(kernel_size)-6),real=True))
	bounds.append((6,(numpy.sqrt(kernel_size)-6)))
	
    for j in xrange(0,num_lgn):	
	setOfAlleles.add(GAllele.GAlleleRange(1.0,25,real=True))
	bounds.append((1.0,25))
	setOfAlleles.add(GAllele.GAlleleRange(1.0,25,real=True))
	bounds.append((1.0,25))
    if not __main__.__dict__.get('BalancedLGN',True):	
	for j in xrange(0,num_lgn):	
		setOfAlleles.add(GAllele.GAlleleRange(0.0,1.0,real=True))
		bounds.append((0.0,1.0))
		setOfAlleles.add(GAllele.GAlleleRange(0.0,1.0,real=True))
		bounds.append((0.0,1.0))
	
	
    if __main__.__dict__.get('LGNTreshold',False):
       for j in xrange(0,num_lgn):
           setOfAlleles.add(GAllele.GAlleleRange(0,20,real=True))
	   bounds.append((0,20))
	

    if __main__.__dict__.get('NegativeLgn',True):
    	minw = -__main__.__dict__.get('MaxW',5000)
    else:
    	minw = 0
    maxw = __main__.__dict__.get('MaxW',5000)
    print __main__.__dict__.get('MaxW',5000)
    
    
    
    if __main__.__dict__.get('SecondLayer',False):
    	for j in xrange(0,num_lgn):		
		for k in xrange(0,int(num_neurons_to_estimate*__main__.__dict__.get('HiddenLayerSize',1.0))):
	 		setOfAlleles.add(GAllele.GAlleleRange(minw,maxw,real=True))
			bounds.append((minw,maxw))
		
       	for j in xrange(0,int(num_neurons_to_estimate*__main__.__dict__.get('HiddenLayerSize',1.0))):		
		for k in xrange(0,num_neurons_to_estimate):
	 		setOfAlleles.add(GAllele.GAlleleRange(-4,4,real=True))
    			bounds.append((-4,4))
    else:
    	for j in xrange(0,num_lgn):		
		for k in xrange(0,num_neurons_to_estimate):
	 		setOfAlleles.add(GAllele.GAlleleRange(minw,maxw,real=True))
			bounds.append((minw,maxw))
			
		
			
    for k in xrange(0,num_neurons_to_estimate):
    	setOfAlleles.add(GAllele.GAlleleRange(0,20,real=True))
	bounds.append((0,20))
	
    if __main__.__dict__.get('SecondLayer',False):
	for k in xrange(0,int(num_neurons_to_estimate*__main__.__dict__.get('HiddenLayerSize',1.0))):
    		setOfAlleles.add(GAllele.GAlleleRange(0,20,real=True))
		bounds.append((0,20))
    
    if __main__.__dict__.get('PyBrain',False):
    	ggevo = GGEvoPyBrain(X,Y,num_lgn,num_neurons_to_estimate,bounds)
    else:	
        ggevo = GGEvo(X,Y,num_lgn,num_neurons_to_estimate,bounds)
	
    
    if __main__.__dict__.get('SecondLayer',False):
        genome_size = num_lgn*4+int(num_neurons_to_estimate*__main__.__dict__.get('HiddenLayerSize',1.0))*num_lgn+num_neurons_to_estimate
    	genome_size += num_neurons_to_estimate*int(num_neurons_to_estimate*__main__.__dict__.get('HiddenLayerSize',1.0)) + int(num_neurons_to_estimate*__main__.__dict__.get('HiddenLayerSize',1.0))
    else:
        genome_size = num_lgn*4+num_neurons_to_estimate*num_lgn+num_neurons_to_estimate
    
    if not __main__.__dict__.get('BalancedLGN',True):
       genome_size += num_lgn*2
    
    if __main__.__dict__.get('LGNTreshold',False):
	genome_size += num_lgn
    
    print genome_size
    print len(bounds)
    
    genome = G1DList.G1DList(genome_size)
	    
    genome.setParams(allele=setOfAlleles)
    genome.evaluator.set(ggevo.perform_gradient_descent)
    genome.mutator.set(Mutators.G1DListMutatorAllele)
    genome.initializator.set(Initializators.G1DListInitializatorAllele)
    genome.crossover.set(Crossovers.G1DListCrossoverUniform) 

    ga = GSimpleGA.GSimpleGA(genome,__main__.__dict__.get('Seed',1023))
    ga.minimax = Consts.minimaxType["minimize"]
    #ga.selector.set(Selectors.GRouletteWheel)
    ga.setElitism(True) 
    ga.setGenerations(__main__.__dict__.get('GenerationSize',100))
    ga.setPopulationSize(__main__.__dict__.get('PopulationSize',100))
    ga.setMutationRate(__main__.__dict__.get('MutationRate',0.05))
    ga.setCrossoverRate(__main__.__dict__.get('CrossoverRate',0.9))
     
    pop = ga.getPopulation()
    #pop.scaleMethod.set(Scaling.SigmaTruncScaling)

    ga.evolve(freq_stats=1)
    best = ga.bestIndividual()
    
    #profmode.print_summary()
    
    #print best
    inp = [v for v in best]
    (new_K,success,c)=fmin_tnc(ggevo.func,inp[:],fprime=ggevo.der,bounds=bounds,maxfun = __main__.__dict__.get('FinalNumEval',10000),messages=0)
    #inp[:-1] = numpy.reshape(inp[:-1],(num_lgn,4)).T.flatten()
    print 'Final likelyhood'
    print ggevo.func(new_K)
    Ks = new_K
    #rf= ggevo.lscsm.returnRFs(numpy.array([Ks[i,:]]))

    #pylab.figure()
    #m = numpy.max(numpy.abs(rf[0,0:kernel_size]))
    #pylab.imshow(numpy.reshape(rf[0],(numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))),vmin=-m,vmax=m,cmap=pylab.cm.RdBu,interpolation='nearest')
    #pylab.colorbar()
    #pylab.show()	
    rfs = ggevo.lscsm.returnRFs(Ks)
    
    rpi = numpy.linalg.pinv(X.T*X + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplace) * X.T * Y
    
    return [Ks,rpi,ggevo.lscsm,rfs]	
Ejemplo n.º 6
0
def compareModelPerformanceWithRPI(training_set,validation_set,training_inputs,validation_inputs,pred_act,pred_val_act,raw_validation_set,sizex,sizey,modelname='Model'):
    from contrib.JanA.regression import laplaceBias	
    
    num_neurons = numpy.shape(pred_act)[1]
    
    kernel_size= numpy.shape(validation_inputs)[1]
    laplace = laplaceBias(sizex,sizey)
    X = numpy.mat(training_inputs)
    rpi = numpy.linalg.pinv(X.T*X + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplace) * X.T * training_set 
    rpi_pred_act = training_inputs * rpi
    rpi_pred_val_act = validation_inputs * rpi

    showRFS(numpy.reshape(numpy.array(rpi.T),(-1,sizex,sizey)))	
    
    print numpy.shape(numpy.mat(training_set))
    print numpy.shape(numpy.mat(pred_act))
    ofs = contrib.JanA.ofestimation.run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(pred_act),num_bins=10,display=True,name=(modelname+'_piece_wise_nonlinearity.png'))
    pred_act_t = numpy.mat(contrib.JanA.ofestimation.apply_output_function(numpy.mat(pred_act),ofs))
    pred_val_act_t = numpy.mat(contrib.JanA.ofestimation.apply_output_function(numpy.mat(pred_val_act),ofs))

    ofs = contrib.JanA.ofestimation.run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(rpi_pred_act),num_bins=10,display=True,name='RPI_piece_wise_nonlinearity.png')
    rpi_pred_act_t = numpy.mat(contrib.JanA.ofestimation.apply_output_function(numpy.mat(rpi_pred_act),ofs))
    rpi_pred_val_act_t = numpy.mat(contrib.JanA.ofestimation.apply_output_function(numpy.mat(rpi_pred_val_act),ofs))
    
    pylab.figure()
    pylab.title('RPI')
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
    	pylab.plot(rpi_pred_val_act[:,i],validation_set[:,i],'o')
    contrib.modelfit.release_fig('RPI_val_relationship.png')	
	
    pylab.figure()
    pylab.title(modelname)
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
 	pylab.plot(pred_val_act[:,i],validation_set[:,i],'o') 
    contrib.modelfit.release_fig('GLM_val_relationship.png')	  
    
    
    
    pylab.figure()
    pylab.title('RPI')
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
    	pylab.plot(rpi_pred_val_act_t[:,i],validation_set[:,i],'o')
    contrib.modelfit.release_fig('RPI_t_val_relationship.png')	
	
	
    pylab.figure()
    pylab.title(modelname)
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
 	pylab.plot(pred_val_act_t[:,i],validation_set[:,i],'o')   
    contrib.modelfit.release_fig('GLM_t_val_relationship.png')
    
    pylab.figure()
    print numpy.shape(numpy.mean(numpy.power(validation_set - rpi_pred_val_act_t,2)[:,:num_neurons],0))
    print numpy.shape(numpy.mean(numpy.power(validation_set - pred_val_act,2)[:,:num_neurons],0))
    pylab.plot(numpy.mean(numpy.power(validation_set - rpi_pred_val_act_t,2)[:,:num_neurons],0),numpy.mean(numpy.power(validation_set - pred_val_act,2)[:,:num_neurons],0),'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI')
    pylab.ylabel(modelname)
    contrib.modelfit.release_fig('GLM_vs_RPI_MSE.png')
    
    print '\n \n RPI \n'
    
    print 'Without TF'
    contrib.modelfit.performance_analysis(training_set,validation_set,rpi_pred_act,rpi_pred_val_act,raw_validation_set,85)
    print 'With TF'
    (signal_power,noise_power,normalized_noise_power,training_prediction_power,rpi_validation_prediction_power,signal_power_variance) = contrib.modelfit.performance_analysis(training_set,validation_set,rpi_pred_act_t,rpi_pred_val_act_t,raw_validation_set,85)
	
    print '\n \n', modelname, '\n'
	
    print 'Without TF'
    (signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance) = contrib.modelfit.performance_analysis(training_set,validation_set,pred_act,pred_val_act,raw_validation_set,85)
    print 'With TF'
    (signal_power_t,noise_power_t,normalized_noise_power_t,training_prediction_power_t,validation_prediction_power_t,signal_power_variance_t) =    contrib.modelfit.performance_analysis(training_set,validation_set,pred_act_t,pred_val_act_t,raw_validation_set,85)
    
    
    significant = numpy.array(numpy.nonzero((numpy.array(normalized_noise_power) < 85) * 1.0))[0]
    
    print significant
    
    pylab.figure()
    pylab.plot(rpi_validation_prediction_power[significant],validation_prediction_power[significant],'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI')
    pylab.ylabel(modelname)
    contrib.modelfit.release_fig('GLM_vs_RPI_prediction_power.png')
    
    pylab.figure()
    pylab.plot(rpi_validation_prediction_power[significant],validation_prediction_power_t[significant ],'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI')
    pylab.ylabel(modelname+'+TF')
    contrib.modelfit.release_fig('GLM_vs_RPI_prediction_power.png')
Ejemplo n.º 7
0
def runCNM():
    res = contrib.dd.loadResults("newest_dataset.dat")
    (sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = contrib.JanA.dataimport.sortOutLoading(res)
    raw_validation_set = db_node.data["raw_validation_set"]
    
    params={}
    params["SCM"]=True
    db_node = db_node.get_child(params)
    
    params={}
    params["LaplacaBias"] = __main__.__dict__.get('LaplaceBias',0.0004)
    params["OFAff"] = __main__.__dict__.get('OFAff','Exp')
    params["OFSurr"] = __main__.__dict__.get('OFSurr','Linear')
    params["num_neurons"] = __main__.__dict__.get('NumNeurons',103)
     
    significant = [0,1,6,7,8,9,12,13,15,16,17,19,20,21,22,23,24,25,26,27,28,30,31,32,33,34,38,39,40,42,43,45,46,47,48,49,50,51,52,53,56,58,59,61,62,63,64,65,66,69,71,72,75,77,78,79,80,83,85,89,91,92,93,94,96,100,101,102] 
    # creat history
    training_set = numpy.mat(training_set)[:,significant]
    validation_set = numpy.mat(validation_set)[:,significant]
    training_inputs= numpy.mat(training_inputs)
    validation_inputs= numpy.mat(validation_inputs)
    
    for i in xrange(0,len(raw_validation_set)):
        raw_validation_set[i] = numpy.mat(raw_validation_set[i])[:,significant]
	
    db_node1 = db_node
    db_node = db_node.get_child(params)
    
    num_pres,num_neurons = numpy.shape(training_set)
    num_pres,kernel_size = numpy.shape(training_inputs)
    num_neurons_to_run=params["num_neurons"]
    
    Ks = numpy.zeros((num_neurons,kernel_size+7))
    
    print 'Kernel size',kernel_size
    
    laplace = laplaceBias(sizex,sizey)
    
    
    rpi = numpy.linalg.pinv(training_inputs.T*training_inputs + __main__.__dict__.get('RPILaplaceBias',0.0004)*laplace) * training_inputs.T * training_set
    
    
    
     
    bounds  = []
     
    for i in xrange(0,kernel_size):
	bounds.append((-1000000000,10000000000))
    
    bounds = [(6,26),(6,25),(1,1000000),(0.0,500000),(0.001,100000),(-100,100),(-100,100)] + bounds
    
    for i in xrange(0,num_neurons_to_run): 
	print i
	k0 = [15,15,20,100000,1,0,0] + rpi[:,i].getA1().tolist() 
	
	print numpy.shape(k0)
	print sizex,sizey
	scm = ContrastNormalizationModel(training_inputs,numpy.mat(training_set[:,i]),laplace,sizex,sizey,of_aff=params["OFAff"],of_surr=params["OFSurr"])

	#K = fmin_ncg(scm.func(),numpy.array(k0),scm.der(),fhess = scm.hess(),avextol=0.0000001,maxiter=20)
	(K,success,c)=fmin_tnc(scm.func(),numpy.array(k0)[:],fprime=scm.der(),bounds = bounds,maxfun = 100000,messages=0)
	print scm.func()(K)
	Ks[i,:] = K

    pred_act = scm.response(training_inputs,Ks)
    pred_val_act = scm.response(validation_inputs,Ks)
    
    from contrib.JanA.sparsness_analysis import TrevesRollsSparsness
    
    showRFS(numpy.reshape(numpy.array(rpi.T),(-1,sizex,sizey)))
    
    showRFS(numpy.reshape(Ks[:,7:kernel_size+7],(-1,sizex,sizey)))
    
    print Ks[:,:7]

    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(pred_val_act)).flatten()) 
    
    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(pred_val_act.T)).flatten()) 

    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(validation_set)).flatten()) 
    
    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(validation_set.T)).flatten()) 


    compareModelPerformanceWithRPI(training_set[:,:num_neurons_to_run],validation_set[:,:num_neurons_to_run],training_inputs,validation_inputs,numpy.mat(pred_act)[:,:num_neurons_to_run],numpy.mat(pred_val_act)[:,:num_neurons_to_run],numpy.array(raw_validation_set)[:,:,:num_neurons_to_run],sizex,sizey,'SCM')	

    db_node.add_data("Kernels",Ks,force=True)
    db_node.add_data("GLM",scm,force=True)
Ejemplo n.º 8
0
def sequentialFilterFinding():
    d = contrib.dd.loadResults("newest_dataset.dat")
    (sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = sortOutLoading(d)
    raw_validation_set = db_node.data["raw_validation_set"]
    contrib.modelfit.save_fig_directory='/home/antolikjan/Doc/reports/Sparsness/SequentialFilterFitting/'
    
    params={}
    params["SequentialFilterFitting"]=True
    db_node = db_node.get_child(params)
    
    params={}
    params["alpha"] = __main__.__dict__.get('Alpha',0.02)
    params["num_neurons"]= __main__.__dict__.get('NumNeurons',10)
    params["OF"] = __main__.__dict__.get('OF','Square')
    db_node = db_node.get_child(params)
    
    num_neurons_to_run=params["num_neurons"]
    
    training_set = numpy.mat(training_set)[:,0:num_neurons_to_run]
    validation_set = numpy.mat(validation_set)[:,0:num_neurons_to_run]
    training_inputs= numpy.mat(training_inputs)
    validation_inputs= numpy.mat(validation_inputs)
    raw_validation_set = numpy.array(raw_validation_set)[:,:,0:num_neurons_to_run] 

    
    num_pres,kernel_size = numpy.shape(training_inputs)
    
    laplace = laplaceBias(numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))
    rpi = numpy.linalg.pinv(training_inputs.T*training_inputs + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplace) * training_inputs.T * training_set

    new_training_inputs = numpy.zeros(numpy.shape(training_inputs))
    new_validation_inputs = numpy.zeros(numpy.shape(validation_inputs)) 
            
    Ks = numpy.zeros((num_neurons_to_run,kernel_size*2+4))
    
    second_pred_act=[]
    second_pred_val_act=[]
    
    for i in xrange(0,num_neurons_to_run):
	print i
	# project out STA
	a = rpi[:,i]/numpy.sqrt(numpy.power(rpi[:,i],2))
	
	for j in xrange(0,numpy.shape(training_inputs)[0]):
		new_training_inputs[j,:] = training_inputs[j,:] - a.T * ((training_inputs[j,:]*a)[0,0])
	
	for j in xrange(0,numpy.shape(validation_inputs)[0]):
		new_validation_inputs[j,:] = validation_inputs[j,:] - a.T * ((validation_inputs[j,:]*a)[0,0])

	
	
	k0 = numpy.zeros((1,kernel_size*2)).flatten().tolist() + [0,0,0,0]
	scm = SimpleContextualModel(numpy.mat(new_training_inputs),numpy.mat(training_set[:,i]),laplace,of1=params["OF"],of2='Zero')
	#K = fmin_ncg(scm.func(),numpy.array(k0),scm.der(),fhess = scm.hess(),avextol=0.0000001,maxiter=20)
	(K,success,c)=fmin_tnc(scm.func(),numpy.array(k0)[:],fprime=scm.der(),maxfun = 1000,messages=0)
	Ks[i,:] = K
        
	second_pred_act.append(scm.response(new_training_inputs,numpy.array([K])))
        second_pred_val_act.append(scm.response(new_validation_inputs,numpy.array([K])))

    second_pred_act = numpy.hstack(second_pred_act)
    second_pred_val_act = numpy.hstack(second_pred_val_act)

    showRFS(numpy.reshape(Ks[:,0:kernel_size],(-1,numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))))
    release_fig('K1.png')
    showRFS(numpy.reshape(Ks[:,kernel_size:2*kernel_size],(-1,numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))))
    release_fig('K2.png')

    
    rpi_pred_act = training_inputs * rpi
    rpi_pred_val_act = validation_inputs * rpi
    
    #ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(rpi_pred_act),num_bins=10,display=True)
    #rpi_pred_act_t = numpy.mat(apply_output_function(numpy.mat(rpi_pred_act),ofs))
    #rpi_pred_val_act_t = numpy.mat(apply_output_function(numpy.mat(rpi_pred_val_act),ofs))

    #ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(second_pred_act),num_bins=10,display=True)
    #second_pred_act_t = numpy.mat(apply_output_function(numpy.mat(second_pred_act),ofs))
    #second_pred_val_act_t = numpy.mat(apply_output_function(numpy.mat(second_pred_val_act),ofs))

    visualize2DOF(rpi_pred_act,numpy.mat(second_pred_act),training_set)
    
    visualize2DOF(rpi_pred_val_act,numpy.mat(second_pred_val_act),validation_set)
    
    #pred_act = second_pred_act_t + rpi_pred_act_t #numpy.multiply(second_pred_act_t,rpi_pred_act_t)
    #pred_val_act = second_pred_val_act_t + rpi_pred_val_act_t #numpy.multiply(second_pred_val_act_t,rpi_pred_val_act_t)
    
    of = fit2DOF(rpi_pred_act,numpy.mat(second_pred_act),training_set)
    
    pred_act = apply2DOF(rpi_pred_act,numpy.mat(second_pred_act),of)
    pred_val_act = apply2DOF(rpi_pred_val_act,numpy.mat(second_pred_val_act),of)

    compareModelPerformanceWithRPI(training_set,validation_set,training_inputs,validation_inputs,numpy.mat(pred_act),numpy.mat(pred_val_act),numpy.array(raw_validation_set),'BilinearModel')
def compareModelPerformanceWithRPI(training_set,
                                   validation_set,
                                   training_inputs,
                                   validation_inputs,
                                   pred_act,
                                   pred_val_act,
                                   raw_validation_set,
                                   sizex,
                                   sizey,
                                   modelname='Model'):
    from contrib.JanA.regression import laplaceBias

    num_neurons = numpy.shape(pred_act)[1]

    kernel_size = numpy.shape(validation_inputs)[1]
    laplace = laplaceBias(sizex, sizey)
    X = numpy.mat(training_inputs)
    rpi = numpy.linalg.pinv(X.T * X +
                            __main__.__dict__.get('RPILaplaceBias', 0.0001) *
                            laplace) * X.T * training_set
    rpi_pred_act = training_inputs * rpi
    rpi_pred_val_act = validation_inputs * rpi

    showRFS(numpy.reshape(numpy.array(rpi.T), (-1, sizex, sizey)))

    print numpy.shape(numpy.mat(training_set))
    print numpy.shape(numpy.mat(pred_act))
    ofs = contrib.JanA.ofestimation.run_nonlinearity_detection(
        numpy.mat(training_set),
        numpy.mat(pred_act),
        num_bins=10,
        display=True,
        name=(modelname + '_piece_wise_nonlinearity.png'))
    pred_act_t = numpy.mat(
        contrib.JanA.ofestimation.apply_output_function(
            numpy.mat(pred_act), ofs))
    pred_val_act_t = numpy.mat(
        contrib.JanA.ofestimation.apply_output_function(
            numpy.mat(pred_val_act), ofs))

    ofs = contrib.JanA.ofestimation.run_nonlinearity_detection(
        numpy.mat(training_set),
        numpy.mat(rpi_pred_act),
        num_bins=10,
        display=True,
        name='RPI_piece_wise_nonlinearity.png')
    rpi_pred_act_t = numpy.mat(
        contrib.JanA.ofestimation.apply_output_function(
            numpy.mat(rpi_pred_act), ofs))
    rpi_pred_val_act_t = numpy.mat(
        contrib.JanA.ofestimation.apply_output_function(
            numpy.mat(rpi_pred_val_act), ofs))

    pylab.figure()
    pylab.title('RPI')
    for i in xrange(0, num_neurons):
        pylab.subplot(11, 11, i + 1)
        pylab.plot(rpi_pred_val_act[:, i], validation_set[:, i], 'o')
    contrib.modelfit.release_fig('RPI_val_relationship.png')

    pylab.figure()
    pylab.title(modelname)
    for i in xrange(0, num_neurons):
        pylab.subplot(11, 11, i + 1)
        pylab.plot(pred_val_act[:, i], validation_set[:, i], 'o')
    contrib.modelfit.release_fig('GLM_val_relationship.png')

    pylab.figure()
    pylab.title('RPI')
    for i in xrange(0, num_neurons):
        pylab.subplot(11, 11, i + 1)
        pylab.plot(rpi_pred_val_act_t[:, i], validation_set[:, i], 'o')
    contrib.modelfit.release_fig('RPI_t_val_relationship.png')

    pylab.figure()
    pylab.title(modelname)
    for i in xrange(0, num_neurons):
        pylab.subplot(11, 11, i + 1)
        pylab.plot(pred_val_act_t[:, i], validation_set[:, i], 'o')
    contrib.modelfit.release_fig('GLM_t_val_relationship.png')

    pylab.figure()
    print numpy.shape(
        numpy.mean(
            numpy.power(validation_set - rpi_pred_val_act_t,
                        2)[:, :num_neurons], 0))
    print numpy.shape(
        numpy.mean(
            numpy.power(validation_set - pred_val_act, 2)[:, :num_neurons], 0))
    pylab.plot(
        numpy.mean(
            numpy.power(validation_set - rpi_pred_val_act_t,
                        2)[:, :num_neurons], 0),
        numpy.mean(
            numpy.power(validation_set - pred_val_act, 2)[:, :num_neurons], 0),
        'o')
    pylab.hold(True)
    pylab.plot([0.0, 1.0], [0.0, 1.0])
    pylab.xlabel('RPI')
    pylab.ylabel(modelname)
    contrib.modelfit.release_fig('GLM_vs_RPI_MSE.png')

    print '\n \n RPI \n'

    print 'Without TF'
    contrib.modelfit.performance_analysis(training_set, validation_set,
                                          rpi_pred_act, rpi_pred_val_act,
                                          raw_validation_set, 85)
    print 'With TF'
    (signal_power, noise_power, normalized_noise_power,
     training_prediction_power, rpi_validation_prediction_power,
     signal_power_variance) = contrib.modelfit.performance_analysis(
         training_set, validation_set, rpi_pred_act_t, rpi_pred_val_act_t,
         raw_validation_set, 85)

    print '\n \n', modelname, '\n'

    print 'Without TF'
    (signal_power, noise_power, normalized_noise_power,
     training_prediction_power, validation_prediction_power,
     signal_power_variance) = contrib.modelfit.performance_analysis(
         training_set, validation_set, pred_act, pred_val_act,
         raw_validation_set, 85)
    print 'With TF'
    (signal_power_t, noise_power_t, normalized_noise_power_t,
     training_prediction_power_t, validation_prediction_power_t,
     signal_power_variance_t) = contrib.modelfit.performance_analysis(
         training_set, validation_set, pred_act_t, pred_val_act_t,
         raw_validation_set, 85)

    significant = numpy.array(
        numpy.nonzero((numpy.array(normalized_noise_power) < 85) * 1.0))[0]

    print significant

    pylab.figure()
    pylab.plot(rpi_validation_prediction_power[significant],
               validation_prediction_power[significant], 'o')
    pylab.hold(True)
    pylab.plot([0.0, 1.0], [0.0, 1.0])
    pylab.xlabel('RPI')
    pylab.ylabel(modelname)
    contrib.modelfit.release_fig('GLM_vs_RPI_prediction_power.png')

    pylab.figure()
    pylab.plot(rpi_validation_prediction_power[significant],
               validation_prediction_power_t[significant], 'o')
    pylab.hold(True)
    pylab.plot([0.0, 1.0], [0.0, 1.0])
    pylab.xlabel('RPI')
    pylab.ylabel(modelname + '+TF')
    contrib.modelfit.release_fig('GLM_vs_RPI_prediction_power.png')
Ejemplo n.º 10
0
def analyseStoredGLM():
    from copy import deepcopy
    dataset = contrib.JanA.dataimport.loadSimpleDataSet('Mice/2009_11_04/Raw/region3/val/spiking_13-15.dat',50,103,num_rep=10,num_frames=1,offset=0,transpose=False)
    rr=[]
    (index,raw_val_set) = dataset
    for i in xrange(0,10):
	rr.append(contrib.JanA.dataimport.generateTrainingSet(contrib.JanA.dataimport.averageRepetitions((index,deepcopy(raw_val_set)),reps=[i])))
    raw_history_validation_set=rr

    res = contrib.dd.loadResults("results.dat")
    node = res.children[0].children[3]
	
    training_set = node.data["training_set"][1:,:]
    validation_set = node.data["validation_set"][1:,:]
    training_inputs = node.data["training_inputs"][1:,:]
    validation_inputs = node.data["validation_inputs"][1:,:]
    raw_validation_set = node.data["raw_validation_set"]
   
    for i in xrange(0,len(raw_validation_set)):
	raw_history_validation_set[i] = raw_history_validation_set[i][0:-1,:]
    
    K = node.children[10].data["Kernels"]
    glm = node.children[10].data["GLM"]
    history_set = node.children[10].data["HistorySet"]
    history_validation_set = node.children[10].data["HistoryValidationSet"]
    	
    print training_inputs
    print training_set	
	
    rpi = numpy.linalg.pinv(numpy.mat(training_inputs).T*numpy.mat(training_inputs) + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplaceBias(numpy.sqrt(numpy.shape(training_inputs)[1]),numpy.sqrt(numpy.shape(training_inputs)[1]))) * numpy.mat(training_inputs).T * numpy.mat(training_set)	
    
    analyseGLM(K,rpi,glm,validation_inputs,training_inputs,validation_set,training_set,raw_validation_set,history_set,history_validation_set,raw_history_validation_set,contrib.dd.DB(None),numpy.shape(training_set)[1])	
Ejemplo n.º 11
0
num_pres,num_neurons = numpy.shape(training_set)
num_pres,kernel_size = numpy.shape(training_inputs)
size = numpy.sqrt(kernel_size)

raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)

(num_pres,num_neurons) = numpy.shape(training_set)

testing_set = training_set[-0.1*num_pres:,:]
training_set = training_set[:-0.1*num_pres,:]

testing_inputs = training_inputs[-0.1*num_pres:,:]
training_inputs = training_inputs[:-0.1*num_pres,:]

kernel_size =  numpy.shape(numpy.mat(training_inputs))[1]
laplace = laplaceBias(numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))
a = 0.0000000001

ITER = 40

rpis = []
for i in xrange(0,ITER):
    print a
    a = a*2
    rpis.append(numpy.linalg.pinv(numpy.mat(training_inputs).T*numpy.mat(training_inputs) + a*laplace) * numpy.mat(training_inputs).T * numpy.mat(training_set))
    

# these will hold the best lambda and RFs for each neuron
best_rpis = numpy.mat(numpy.zeros((kernel_size,num_neurons)))
best_lambda = numpy.mat(numpy.zeros((1,num_neurons)))
Ejemplo n.º 12
0
raw_validation_set = dataset_node.data["raw_validation_set"]
training_inputs= dataset_node.data["training_inputs"]
validation_inputs= dataset_node.data["validation_inputs"]

(num_pres,num_neurons) = numpy.shape(training_set)

testing_set = training_set[-0.1*num_pres:,:]
training_set = training_set[:-0.1*num_pres,:]

testing_inputs = training_inputs[-0.1*num_pres:,:]
training_inputs = training_inputs[:-0.1*num_pres,:]

kernel_size =  numpy.shape(numpy.mat(training_inputs))[1]


laplace = laplaceBias(int(numpy.sqrt(kernel_size)),int(numpy.sqrt(kernel_size)))
a = 0.0000000001

rpis = []
for i in xrange(0,40):
    print a
    a = a*2
    rpis.append(numpy.linalg.pinv(numpy.mat(training_inputs).T*numpy.mat(training_inputs) + a*laplace) * numpy.mat(training_inputs).T * numpy.mat(training_set))
    

best_rpis = numpy.mat(numpy.zeros((kernel_size,num_neurons)))
best_lambda = numpy.mat(numpy.zeros((1,num_neurons)))
import pylab
pylab.figure()

a = [1e-10,2e-10,1e-10,8e-10,1.6e-09,3.2e-09,6.4e-09,1.28e-08,2.56e-08,5.12e-08,1.024e-07,2.048e-07,4.096e-07,8.192e-07,1.6384e-06,3.2768e-06,6.5536e-06,1.31072e-05,2.62144e-05,5.24288e-05,0.0001048576,0.0002097152,0.0004194304,0.0008388608,0.0016777216,0.0033554432,0.0067108864,0.0134217728,0.0268435456,0.0536870912,0.1073741824,0.2147483648,0.4294967296,0.8589934592,1.7179869184,3.4359738368,6.8719476736,13.7438953472,27.4877906944,54.9755813888]
Ejemplo n.º 13
0
def runCNM():
    res = contrib.dd.loadResults("newest_dataset.dat")
    (sizex, sizey, training_inputs, training_set, validation_inputs,
     validation_set, ff, db_node) = contrib.JanA.dataimport.sortOutLoading(res)
    raw_validation_set = db_node.data["raw_validation_set"]

    params = {}
    params["SCM"] = True
    db_node = db_node.get_child(params)

    params = {}
    params["LaplacaBias"] = __main__.__dict__.get('LaplaceBias', 0.0004)
    params["OFAff"] = __main__.__dict__.get('OFAff', 'Exp')
    params["OFSurr"] = __main__.__dict__.get('OFSurr', 'Linear')
    params["num_neurons"] = __main__.__dict__.get('NumNeurons', 103)

    significant = [
        0, 1, 6, 7, 8, 9, 12, 13, 15, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26,
        27, 28, 30, 31, 32, 33, 34, 38, 39, 40, 42, 43, 45, 46, 47, 48, 49, 50,
        51, 52, 53, 56, 58, 59, 61, 62, 63, 64, 65, 66, 69, 71, 72, 75, 77, 78,
        79, 80, 83, 85, 89, 91, 92, 93, 94, 96, 100, 101, 102
    ]
    # creat history
    training_set = numpy.mat(training_set)[:, significant]
    validation_set = numpy.mat(validation_set)[:, significant]
    training_inputs = numpy.mat(training_inputs)
    validation_inputs = numpy.mat(validation_inputs)

    for i in xrange(0, len(raw_validation_set)):
        raw_validation_set[i] = numpy.mat(raw_validation_set[i])[:,
                                                                 significant]

    db_node1 = db_node
    db_node = db_node.get_child(params)

    num_pres, num_neurons = numpy.shape(training_set)
    num_pres, kernel_size = numpy.shape(training_inputs)
    num_neurons_to_run = params["num_neurons"]

    Ks = numpy.zeros((num_neurons, kernel_size + 7))

    print 'Kernel size', kernel_size

    laplace = laplaceBias(sizex, sizey)

    rpi = numpy.linalg.pinv(training_inputs.T * training_inputs +
                            __main__.__dict__.get('RPILaplaceBias', 0.0004) *
                            laplace) * training_inputs.T * training_set

    bounds = []

    for i in xrange(0, kernel_size):
        bounds.append((-1000000000, 10000000000))

    bounds = [(6, 26), (6, 25), (1, 1000000), (0.0, 500000), (0.001, 100000),
              (-100, 100), (-100, 100)] + bounds

    for i in xrange(0, num_neurons_to_run):
        print i
        k0 = [15, 15, 20, 100000, 1, 0, 0] + rpi[:, i].getA1().tolist()

        print numpy.shape(k0)
        print sizex, sizey
        scm = ContrastNormalizationModel(training_inputs,
                                         numpy.mat(training_set[:, i]),
                                         laplace,
                                         sizex,
                                         sizey,
                                         of_aff=params["OFAff"],
                                         of_surr=params["OFSurr"])

        #K = fmin_ncg(scm.func(),numpy.array(k0),scm.der(),fhess = scm.hess(),avextol=0.0000001,maxiter=20)
        (K, success, c) = fmin_tnc(scm.func(),
                                   numpy.array(k0)[:],
                                   fprime=scm.der(),
                                   bounds=bounds,
                                   maxfun=100000,
                                   messages=0)
        print scm.func()(K)
        Ks[i, :] = K

    pred_act = scm.response(training_inputs, Ks)
    pred_val_act = scm.response(validation_inputs, Ks)

    from contrib.JanA.sparsness_analysis import TrevesRollsSparsness

    showRFS(numpy.reshape(numpy.array(rpi.T), (-1, sizex, sizey)))

    showRFS(numpy.reshape(Ks[:, 7:kernel_size + 7], (-1, sizex, sizey)))

    print Ks[:, :7]

    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(pred_val_act)).flatten())

    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(pred_val_act.T)).flatten())

    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(validation_set)).flatten())

    pylab.figure()
    pylab.hist(TrevesRollsSparsness(numpy.mat(validation_set.T)).flatten())

    compareModelPerformanceWithRPI(
        training_set[:, :num_neurons_to_run],
        validation_set[:, :num_neurons_to_run], training_inputs,
        validation_inputs,
        numpy.mat(pred_act)[:, :num_neurons_to_run],
        numpy.mat(pred_val_act)[:, :num_neurons_to_run],
        numpy.array(raw_validation_set)[:, :, :num_neurons_to_run], sizex,
        sizey, 'SCM')

    db_node.add_data("Kernels", Ks, force=True)
    db_node.add_data("GLM", scm, force=True)
Ejemplo n.º 14
0
def runSCM():
    res = contrib.dd.loadResults("newest_dataset.dat")
    (sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = contrib.JanA.dataimport.sortOutLoading(res)
    raw_validation_set = db_node.data["raw_validation_set"]
    
    params={}
    params["SCM"]=True
    db_node = db_node.get_child(params)
    
    params={}
    params["CLaplacaBias"] = __main__.__dict__.get('CLaplaceBias',0.0004)
    params["SLaplacaBias"] = __main__.__dict__.get('SLaplaceBias',0.0004)
    params["OF1"] = __main__.__dict__.get('OF1','Exp')
    params["OF2"] = __main__.__dict__.get('OF2','Square')
    params["num_neurons"] = __main__.__dict__.get('NumNeurons',103)
     
    # creat history
    training_set = numpy.mat(training_set)
    validation_set = numpy.mat(validation_set)
    training_inputs= numpy.mat(training_inputs)
    validation_inputs= numpy.mat(validation_inputs)
    
    db_node1 = db_node
    db_node = db_node.get_child(params)
    
    num_pres,num_neurons = numpy.shape(training_set)
    num_pres,kernel_size = numpy.shape(training_inputs)
    num_neurons_to_run=params["num_neurons"]
    
    Ks = numpy.zeros((num_neurons,kernel_size*2+4))
    
    print 'Kernel size',kernel_size
    
    laplace = laplaceBias(numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))
    
    rpi = numpy.linalg.pinv(training_inputs.T*training_inputs + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplace) * training_inputs.T * training_set
     
    for i in xrange(0,num_neurons_to_run): 
	print i
	#k0 = rpi[:,i].getA1().tolist() +  numpy.zeros((1,kernel_size)).flatten().tolist() + [0,0]
	k0 = numpy.zeros((1,kernel_size*2)).flatten().tolist() + [0,0,0,0]
	scm = SimpleContextualModel(numpy.mat(training_inputs),numpy.mat(training_set[:,i]),laplace,of1=params["OF1"],of2=params["OF2"])

	
	#K = fmin_ncg(scm.func(),numpy.array(k0),scm.der(),fhess = scm.hess(),avextol=0.0000001,maxiter=20)
	(K,success,c)=fmin_tnc(scm.func(),numpy.array(k0)[:],fprime=scm.der(),maxfun = 10000,messages=0)
	#print success
	#print c
	Ks[i,:] = K
    
    pred_act = scm.response(training_inputs,Ks)
    pred_val_act = scm.response(validation_inputs,Ks)
    
    print Ks[0,:]
    
    
    showRFS(numpy.reshape(Ks[:,0:kernel_size],(-1,numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))))
    release_fig('K1.png')
    showRFS(numpy.reshape(Ks[:,kernel_size:2*kernel_size],(-1,numpy.sqrt(kernel_size),numpy.sqrt(kernel_size))))
    release_fig('K2.png')
        
    compareModelPerformanceWithRPI(training_set[:,:num_neurons_to_run],validation_set[:,:num_neurons_to_run],training_inputs,validation_inputs,numpy.mat(pred_act)[:,:num_neurons_to_run],numpy.mat(pred_val_act)[:,:num_neurons_to_run],numpy.array(raw_validation_set)[:,:,:num_neurons_to_run],'SCM')	

    db_node.add_data("Kernels",Ks,force=True)
    db_node.add_data("GLM",scm,force=True)
Ejemplo n.º 15
0
def analyseStoredGLM():
    from copy import deepcopy

    dataset = contrib.JanA.dataimport.loadSimpleDataSet(
        "Mice/2009_11_04/Raw/region3/val/spiking_13-15.dat",
        50,
        103,
        num_rep=10,
        num_frames=1,
        offset=0,
        transpose=False,
    )
    rr = []
    (index, raw_val_set) = dataset
    for i in xrange(0, 10):
        rr.append(
            contrib.JanA.dataimport.generateTrainingSet(
                contrib.JanA.dataimport.averageRepetitions((index, deepcopy(raw_val_set)), reps=[i])
            )
        )
    raw_history_validation_set = rr

    res = contrib.dd.loadResults("results.dat")
    node = res.children[0].children[3]

    training_set = node.data["training_set"][1:, :]
    validation_set = node.data["validation_set"][1:, :]
    training_inputs = node.data["training_inputs"][1:, :]
    validation_inputs = node.data["validation_inputs"][1:, :]
    raw_validation_set = node.data["raw_validation_set"]

    for i in xrange(0, len(raw_validation_set)):
        raw_history_validation_set[i] = raw_history_validation_set[i][0:-1, :]

    K = node.children[10].data["Kernels"]
    glm = node.children[10].data["GLM"]
    history_set = node.children[10].data["HistorySet"]
    history_validation_set = node.children[10].data["HistoryValidationSet"]

    print training_inputs
    print training_set

    rpi = (
        numpy.linalg.pinv(
            numpy.mat(training_inputs).T * numpy.mat(training_inputs)
            + __main__.__dict__.get("RPILaplaceBias", 0.0001)
            * laplaceBias(numpy.sqrt(numpy.shape(training_inputs)[1]), numpy.sqrt(numpy.shape(training_inputs)[1]))
        )
        * numpy.mat(training_inputs).T
        * numpy.mat(training_set)
    )

    analyseGLM(
        K,
        rpi,
        glm,
        validation_inputs,
        training_inputs,
        validation_set,
        training_set,
        raw_validation_set,
        history_set,
        history_validation_set,
        raw_history_validation_set,
        contrib.dd.DB(None),
        numpy.shape(training_set)[1],
    )
Ejemplo n.º 16
0
raw_validation_set = dataset_node.data["raw_validation_set"]
training_inputs = dataset_node.data["training_inputs"]
validation_inputs = dataset_node.data["validation_inputs"]

(num_pres, num_neurons) = numpy.shape(training_set)

testing_set = training_set[-0.1 * num_pres :, :]
training_set = training_set[: -0.1 * num_pres, :]

testing_inputs = training_inputs[-0.1 * num_pres :, :]
training_inputs = training_inputs[: -0.1 * num_pres, :]

kernel_size = numpy.shape(numpy.mat(training_inputs))[1]


laplace = laplaceBias(int(numpy.sqrt(kernel_size)), int(numpy.sqrt(kernel_size)))
a = 0.0000000001

rpis = []
for i in xrange(0, 40):
    print a
    a = a * 2
    rpis.append(
        numpy.linalg.pinv(numpy.mat(training_inputs).T * numpy.mat(training_inputs) + a * laplace)
        * numpy.mat(training_inputs).T
        * numpy.mat(training_set)
    )


best_rpis = numpy.mat(numpy.zeros((kernel_size, num_neurons)))
best_lambda = numpy.mat(numpy.zeros((1, num_neurons)))
Ejemplo n.º 17
0
def fetch_data():
    dirr= "/home/antolikjan/eddie_data/LSCSM_SEQ_LGN5_LLTrue_SL=True_LinearLGN/"
    reg=re.compile(".*FromWhichNeuron\=([^,]*)")
    import contrib.JanA.LSCSM
    import contrib
    
    (sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = contrib.JanA.dataimport.sortOutLoading(contrib.dd.DB(None))
    raw_validation_set = db_node.data["raw_validation_set"]
    

    num_lgn=16
    
    pred_act=[]
    pred_val_act=[]
    
    idxs = []
    to_delete = []
    rfs = []
    Ks = []
    
    lscsm_new = contrib.JanA.LSCSM.LSCSM1(numpy.mat(training_inputs),numpy.mat(training_set)[:,0],num_lgn,1)
    
    
    for a in os.listdir(dirr):
	num = int(reg.search(a).group(1))
        print num
	idxs.append(num)
	if os.path.exists(dirr+a+'/res.dat'):
		res = contrib.dd.loadResults(dirr+a+'/res.dat')
		dataset_node = res.children[0].children[0]
	        
		print res.children[0].children[0].children[0].children[0].data.keys()
		K = res.children[0].children[0].children[0].children[0].data["Kernels"]
 		lscsm = res.children[0].children[0].children[0].children[0].data["LSCSM"][0]
		
		Ks.append(K[0,:])
		#rfs.append(lscsm_new.returnRFs(K[0,:]))
		
		
		#rfs=lscsm.returnRFs(numpy.array(K)[0])
		
		#print numpy.shape(numpy.array(rfs))
		#print sizex,sizey

		#showRFS(numpy.reshape(numpy.array(rfs),(-1,sizex,sizey)))
		#print 'A'
		#print numpy.shape(numpy.array(K)[0])
		
		#print numpy.array(K)[0]
		#lscsm.X.value = numpy.mat(training_inputs)
		#lscsm.Y.value = numpy.mat(training_set[:,int(num)]).T
		#lscsm1.X.value = numpy.mat(training_inputs)
		#lscsm1.Y.value = numpy.mat(training_set[:,int(num)]).T
		#fun = lscsm1.func()
		#print lscsm.func()(numpy.array(K)[0])
		#print fun(numpy.array(K)[0])
		
		pred_act.append(lscsm.response(training_inputs,numpy.array(K)[0]))
		pred_val_act.append(lscsm.response(validation_inputs,numpy.array(K)[0]))
	else:
		pred_act.append(numpy.zeros((1800,1)))
		pred_val_act.append(numpy.zeros((50,1)))
		rfs.append(numpy.zeros((1,sizex*sizey)))
		to_delete.append(num)
	
    
    print 'Number of missed neurons:', len(to_delete)
    from contrib.JanA.regression import laplaceBias
    
    pred_act = numpy.hstack(pred_act)	
    pred_val_act = numpy.hstack(pred_val_act)
    
    #print numpy.shape(rfs)
    #rfs = numpy.vstack(rfs)
    
    pred_act_new = pred_act[:,numpy.argsort(idxs)]
    pred_val_act_new = pred_val_act[:,numpy.argsort(idxs)]
    #rfs = rfs[numpy.argsort(idxs),:]
    
    print 'Y'
    print numpy.shape(pred_act_new)
    print numpy.shape(training_set)
    
    training_set = numpy.delete(training_set, to_delete, axis = 1)
    validation_set = numpy.delete(validation_set, to_delete, axis = 1)
    pred_act_new = numpy.delete(pred_act_new, to_delete, axis = 1)
    pred_val_act_new = numpy.delete(pred_val_act_new, to_delete, axis = 1)
    #rfs = numpy.delete(rfs, to_delete, axis = 0)
    for i in xrange(0,10):
    	raw_validation_set[i] = numpy.delete(raw_validation_set[i], to_delete, axis = 1)

    print 'X'
    print numpy.shape(pred_act_new)
    print numpy.shape(training_set)
    

    laplace = laplaceBias(sizex,sizey)
    LSCSM_rpi = numpy.linalg.pinv(numpy.mat(training_inputs).T*numpy.mat(training_inputs) + __main__.__dict__.get('RPILaplaceBias',0.0001)*laplace) * numpy.mat(training_inputs).T * numpy.mat(pred_act_new) 
    
    print 'A'
    print numpy.shape(LSCSM_rpi.T)
    print numpy.shape(numpy.array(LSCSM_rpi.T))
    print numpy.shape(numpy.reshape(numpy.array(LSCSM_rpi.T),(-1,sizex,sizey)))
    
    
    showRFS(numpy.reshape(numpy.array(LSCSM_rpi.T),(-1,sizex,sizey)))

    #print numpy.shape(rfs)
    #rfs = numpy.vstack(rfs)
    Ks = numpy.vstack(Ks)
    #print numpy.shape(rfs)
    
    #showRFS(numpy.reshape(rfs,(-1,sizex,sizey)))
    
    #a = Ks[:,7*num_lgn:7*num_lgn+1*num_lgn]
    #t = Ks[:,6*num_lgn:6*num_lgn+1*num_lgn]
    
    #print 'T'
    #print t
    
    #m = numpy.mat(numpy.tile(numpy.mean(abs(a),axis=1),(num_lgn,1))).T
    
    
    #print 'A'
    #print a
    
    #a = (abs(a) >= 0.3*m)*1.0
    #a = (t < 0.001)*1.0
    #Ks[:,0:num_lgn] = numpy.multiply(Ks[:,0:num_lgn],a)
    #Ks[:,num_lgn:2*num_lgn] = numpy.multiply(Ks[:,num_lgn:2*num_lgn],a)
    
    pylab.figure()
    pylab.plot(Ks[:,0:num_lgn].flatten(),Ks[:,num_lgn:2*num_lgn].flatten(),'bo')
    
    
    from contrib.JanA.sparsness_analysis import TrevesRollsSparsness
    
    pylab.figure()
    pylab.subplot(221)
    pylab.hist(TrevesRollsSparsness(numpy.mat(validation_set)).flatten())
    pylab.axis(xmin=0.0,xmax=1.0)
    pylab.title('Lifetime sparesness') 
    pylab.subplot(222)
    pylab.hist(TrevesRollsSparsness(numpy.mat(validation_set.T)).flatten())
    pylab.axis(xmin=0.0,xmax=1.0)
    pylab.title('Population sparesness')
    pylab.subplot(223) 
    pylab.hist(TrevesRollsSparsness(numpy.mat(pred_val_act)).flatten())
    pylab.axis(xmin=0.0,xmax=1.0) 
    pylab.subplot(224)
    pylab.hist(TrevesRollsSparsness(numpy.mat(pred_val_act.T)).flatten())
    pylab.axis(xmin=0.0,xmax=1.0) 
    
    print 'Life time sparsness measured:',numpy.mean(TrevesRollsSparsness(numpy.mat(validation_set)).flatten())
    print 'Life time sparsness predicted:',numpy.mean(TrevesRollsSparsness(numpy.mat(pred_val_act)).flatten())
    print 'Population sparsness measured:',numpy.mean(TrevesRollsSparsness(numpy.mat(validation_set.T)).flatten())
    print 'Population sparsness predicted:',numpy.mean(TrevesRollsSparsness(numpy.mat(pred_val_act.T)).flatten())
    

    #pred_act_new = numpy.mat(training_inputs) * numpy.mat(rfs).T 
    #pred_val_act_new = numpy.mat(validation_inputs) * numpy.mat(rfs).T
    
    print 'b'
    print numpy.shape(pred_act_new)
    print numpy.shape(training_set)
    
    return (pred_act_new,pred_val_act_new,training_set,validation_set,training_inputs,validation_inputs,numpy.array(raw_validation_set))
Ejemplo n.º 18
0
num_pres, num_neurons = numpy.shape(training_set)
num_pres, kernel_size = numpy.shape(training_inputs)
size = numpy.sqrt(kernel_size)

raw_validation_data_set = numpy.rollaxis(numpy.array(raw_validation_set), 2)

(num_pres, num_neurons) = numpy.shape(training_set)

testing_set = training_set[-0.1 * num_pres:, :]
training_set = training_set[:-0.1 * num_pres, :]

testing_inputs = training_inputs[-0.1 * num_pres:, :]
training_inputs = training_inputs[:-0.1 * num_pres, :]

kernel_size = numpy.shape(numpy.mat(training_inputs))[1]
laplace = laplaceBias(numpy.sqrt(kernel_size), numpy.sqrt(kernel_size))
a = 0.0000000001

ITER = 40

rpis = []
for i in xrange(0, ITER):
    print a
    a = a * 2
    rpis.append(
        numpy.linalg.pinv(
            numpy.mat(training_inputs).T * numpy.mat(training_inputs) +
            a * laplace) * numpy.mat(training_inputs).T *
        numpy.mat(training_set))

# these will hold the best lambda and RFs for each neuron