Пример #1
0
def analyse():
    from noiseEstimation import signal_power_test
    (pred_act,pred_val_act,training_set,validation_set,training_inputs,validation_inputs,raw_validation_set)= fetch_data()
    size =numpy.sqrt(numpy.shape(training_inputs)[1])
    
    print numpy.shape(training_set)
    print numpy.shape(validation_set)
    print numpy.shape(pred_act)
    print numpy.shape(pred_val_act)
    
    printCorrelationAnalysis(training_set,validation_set,pred_act,pred_val_act)
    
    raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
       
    #print numpy.shape(numpy.array(ts[i]))
    #print numpy.shape(numpy.array(vs[i]))
    #print numpy.shape(pred_act[i])
    #print numpy.shape(pred_val_act[i])
       
    signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), pred_act, pred_val_act)
    
    to_delete = numpy.array(numpy.nonzero((numpy.array(normalized_noise_power) > 70) * 1.0))[0]
    print 'Validation prediction power:', numpy.mean(numpy.delete(validation_prediction_power, to_delete))
    

    
    #compareModelPerformanceWithRPI(training_set,validation_set,training_inputs,validation_inputs,numpy.mat(pred_act),numpy.mat(pred_val_act),raw_validation_set,size,size,modelname='LSCSM')	
Пример #2
0
def runLSCSMAnalysis(rpi_pred_act,rpi_pred_val_act,glm_pred_act,glm_pred_val_act,training_set,validation_set,num_neurons,raw_validation_data_set):
    
    ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(rpi_pred_act),display=True)
    rpi_pred_act_t = apply_output_function(numpy.mat(rpi_pred_act),ofs)
    rpi_pred_val_act_t = apply_output_function(numpy.mat(rpi_pred_val_act),ofs)
    
    ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(glm_pred_act),display=True)
    glm_pred_act_t = apply_output_function(numpy.mat(glm_pred_act),ofs)
    glm_pred_val_act_t = apply_output_function(numpy.mat(glm_pred_val_act),ofs)
    
    
    pylab.figure()
    
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
    	pylab.plot(rpi_pred_val_act[:,i],validation_set[:,i],'o')
    pylab.savefig(normalize_path('RPI_val_relationship.png'))
	
    pylab.figure()
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
 	pylab.plot(glm_pred_val_act[:,i],validation_set[:,i],'o')   
    pylab.savefig(normalize_path('GLM_val_relationship.png'))
    
    
    pylab.figure()
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
    	pylab.plot(rpi_pred_val_act_t[:,i],validation_set[:,i],'o')
    pylab.savefig(normalize_path('RPI_t_val_relationship.png'))
	
	
    pylab.figure()
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
 	pylab.plot(glm_pred_val_act_t[:,i],validation_set[:,i],'o')
        pylab.title('RPI')   
    pylab.savefig(normalize_path('GLM_t_val_relationship.png'))
    
    
    print numpy.shape(validation_set)
    print numpy.shape(rpi_pred_val_act_t)
    print numpy.shape(glm_pred_val_act)
    
    pylab.figure()
    pylab.plot(numpy.mean(numpy.power(validation_set - rpi_pred_val_act_t,2),0),numpy.mean(numpy.power(validation_set - glm_pred_val_act,2),0),'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI')
    pylab.ylabel('GLM')
    pylab.savefig(normalize_path('GLM_vs_RPI_MSE.png'))
    
    
    print 'RPI \n'
	
    (ranks,correct,pred) = performIdentification(validation_set,rpi_pred_val_act)
    print "Natural:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - rpi_pred_val_act,2))
	
    (ranks,correct,pred) = performIdentification(validation_set,rpi_pred_val_act_t)
    print "Natural+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - rpi_pred_val_act_t,2))
		
    signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(rpi_pred_act), numpy.array(rpi_pred_val_act))
    signal_power,noise_power,normalized_noise_power,training_prediction_power_t,rpi_validation_prediction_power_t,signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(rpi_pred_act_t), numpy.array(rpi_pred_val_act_t))
	
    print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power) , " / " , numpy.mean(validation_prediction_power)
    print "Prediction power after TF on training set / validation set: ", numpy.mean(training_prediction_power_t) , " / " , numpy.mean(rpi_validation_prediction_power_t)

	
    print '\n \n GLM \n'
	
    (ranks,correct,pred) = performIdentification(validation_set,glm_pred_val_act)
    print "Natural:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - glm_pred_val_act,2))
	
    (ranks,correct,pred) = performIdentification(validation_set,glm_pred_val_act_t)
    print "Natural+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - glm_pred_val_act_t,2))
		
    glm_signal_power,glm_noise_power,glm_normalized_noise_power,glm_training_prediction_power,glm_validation_prediction_power,glm_signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(glm_pred_act), numpy.array(glm_pred_val_act))
    glm_signal_power_t,glm_noise_power_t,glm_normalized_noise_power_t,glm_training_prediction_power_t,glm_validation_prediction_power_t,glm_signal_power_variances_t = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(glm_pred_act_t), numpy.array(glm_pred_val_act_t))
	
    print "Prediction power on training set / validation set: ", numpy.mean(glm_training_prediction_power) , " / " , numpy.mean(glm_validation_prediction_power)
    print "Prediction power after TF on training set / validation set: ", numpy.mean(glm_training_prediction_power_t) , " / " , numpy.mean(glm_validation_prediction_power_t)
    
    pylab.figure()
    pylab.plot(rpi_validation_prediction_power_t[:num_neurons],glm_validation_prediction_power[:num_neurons],'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI_t')
    pylab.ylabel('GLM')
    pylab.savefig(normalize_path('GLM_vs_RPI_prediction_power.png'))

    
    pylab.figure()
    pylab.plot(rpi_validation_prediction_power_t[:num_neurons],glm_validation_prediction_power_t[:num_neurons],'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI_t')
    pylab.ylabel('GLM_t')
    pylab.savefig('GLMt_vs_RPIt_prediction_power.png')
    
    print 'WithoutTF'
    printCorrelationAnalysis(training_set,validation_set,glm_pred_act,glm_pred_val_act)
    print 'WithTF'
    printCorrelationAnalysis(training_set,validation_set,glm_pred_act_t,glm_pred_val_act_t)
                

    #db_node.add_data("Kernels",K,force=True)
    #db_node.add_data("GLM",glm,force=True)
    #db_node.add_data("ReversCorrelationPredictedActivities",glm_pred_act,force=True)
    #db_node.add_data("ReversCorrelationPredictedActivities+TF",glm_pred_act_t,force=True)
    #db_node.add_data("ReversCorrelationPredictedValidationActivities",glm_pred_val_act,force=True)
    #db_node.add_data("ReversCorrelationPredictedValidationActivities+TF",glm_pred_val_act_t,force=True)
    #return [K,validation_inputs, validation_set]
	
	
Пример #3
0
def runLSCSMAnalysis(rpi_pred_act,rpi_pred_val_act,glm_pred_act,glm_pred_val_act,training_set,validation_set,num_neurons,raw_validation_data_set):
    
    ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(rpi_pred_act),display=True)
    rpi_pred_act_t = apply_output_function(numpy.mat(rpi_pred_act),ofs)
    rpi_pred_val_act_t = apply_output_function(numpy.mat(rpi_pred_val_act),ofs)
    
    ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(glm_pred_act),display=True)
    glm_pred_act_t = apply_output_function(numpy.mat(glm_pred_act),ofs)
    glm_pred_val_act_t = apply_output_function(numpy.mat(glm_pred_val_act),ofs)
    
    
    pylab.figure()
    
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
    	pylab.plot(rpi_pred_val_act[:,i],validation_set[:,i],'o')
    pylab.savefig(normalize_path('RPI_val_relationship.png'))
	
    pylab.figure()
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
 	pylab.plot(glm_pred_val_act[:,i],validation_set[:,i],'o')   
    pylab.savefig(normalize_path('GLM_val_relationship.png'))
    
    
    pylab.figure()
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
    	pylab.plot(rpi_pred_val_act_t[:,i],validation_set[:,i],'o')
    pylab.savefig(normalize_path('RPI_t_val_relationship.png'))
	
	
    pylab.figure()
    for i in xrange(0,num_neurons):
	pylab.subplot(11,11,i+1)    
 	pylab.plot(glm_pred_val_act_t[:,i],validation_set[:,i],'o')
        pylab.title('RPI')   
    pylab.savefig(normalize_path('GLM_t_val_relationship.png'))
    
    
    print numpy.shape(validation_set)
    print numpy.shape(rpi_pred_val_act_t)
    print numpy.shape(glm_pred_val_act)
    
    pylab.figure()
    pylab.plot(numpy.mean(numpy.power(validation_set - rpi_pred_val_act_t,2),0),numpy.mean(numpy.power(validation_set - glm_pred_val_act,2),0),'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI')
    pylab.ylabel('GLM')
    pylab.savefig(normalize_path('GLM_vs_RPI_MSE.png'))
    
    
    print 'RPI \n'
	
    (ranks,correct,pred) = performIdentification(validation_set,rpi_pred_val_act)
    print "Natural:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - rpi_pred_val_act,2))
	
    (ranks,correct,pred) = performIdentification(validation_set,rpi_pred_val_act_t)
    print "Natural+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - rpi_pred_val_act_t,2))
		
    signal_power,noise_power,normalized_noise_power,training_prediction_power,validation_prediction_power,signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(rpi_pred_act), numpy.array(rpi_pred_val_act))
    signal_power,noise_power,normalized_noise_power,training_prediction_power_t,rpi_validation_prediction_power_t,signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(rpi_pred_act_t), numpy.array(rpi_pred_val_act_t))
	
    print "Prediction power on training set / validation set: ", numpy.mean(training_prediction_power) , " / " , numpy.mean(validation_prediction_power)
    print "Prediction power after TF on training set / validation set: ", numpy.mean(training_prediction_power_t) , " / " , numpy.mean(rpi_validation_prediction_power_t)

	
    print '\n \n GLM \n'
	
    (ranks,correct,pred) = performIdentification(validation_set,glm_pred_val_act)
    print "Natural:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - glm_pred_val_act,2))
	
    (ranks,correct,pred) = performIdentification(validation_set,glm_pred_val_act_t)
    print "Natural+TF:", correct , "Mean rank:", numpy.mean(ranks) , "MSE", numpy.mean(numpy.power(validation_set - glm_pred_val_act_t,2))
		
    glm_signal_power,glm_noise_power,glm_normalized_noise_power,glm_training_prediction_power,glm_validation_prediction_power,glm_signal_power_variance = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(glm_pred_act), numpy.array(glm_pred_val_act))
    glm_signal_power_t,glm_noise_power_t,glm_normalized_noise_power_t,glm_training_prediction_power_t,glm_validation_prediction_power_t,glm_signal_power_variances_t = signal_power_test(raw_validation_data_set, numpy.array(training_set), numpy.array(validation_set), numpy.array(glm_pred_act_t), numpy.array(glm_pred_val_act_t))
	
    print "Prediction power on training set / validation set: ", numpy.mean(glm_training_prediction_power) , " / " , numpy.mean(glm_validation_prediction_power)
    print "Prediction power after TF on training set / validation set: ", numpy.mean(glm_training_prediction_power_t) , " / " , numpy.mean(glm_validation_prediction_power_t)
    
    pylab.figure()
    pylab.plot(rpi_validation_prediction_power_t[:num_neurons],glm_validation_prediction_power[:num_neurons],'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI_t')
    pylab.ylabel('GLM')
    pylab.savefig(normalize_path('GLM_vs_RPI_prediction_power.png'))

    
    pylab.figure()
    pylab.plot(rpi_validation_prediction_power_t[:num_neurons],glm_validation_prediction_power_t[:num_neurons],'o')
    pylab.hold(True)
    pylab.plot([0.0,1.0],[0.0,1.0])
    pylab.xlabel('RPI_t')
    pylab.ylabel('GLM_t')
    pylab.savefig('GLMt_vs_RPIt_prediction_power.png')
    
    print 'WithoutTF'
    printCorrelationAnalysis(training_set,validation_set,glm_pred_act,glm_pred_val_act)
    print 'WithTF'
    printCorrelationAnalysis(training_set,validation_set,glm_pred_act_t,glm_pred_val_act_t)
Пример #4
0
def runLSCTM():
    import noiseEstimation
    res = contrib.dd.DB(None)
    (sizex, sizey, training_inputs, training_set, validation_inputs,
     validation_set, ff, db_node) = contrib.JanA.dataimport.sortOutLoading(res)
    raw_validation_set = db_node.data["raw_validation_set"]

    training_inputs1 = training_inputs[0:-4, :]
    validation_inputs1 = validation_inputs[0:-4, :]
    training_inputs2 = training_inputs[1:-3, :]
    validation_inputs2 = validation_inputs[1:-3, :]
    training_inputs3 = training_inputs[2:-2, :]
    validation_inputs3 = validation_inputs[2:-2, :]

    training_set = training_set[4:, :]
    validation_set = validation_set[4:, :]

    for i in xrange(0, len(raw_validation_set)):
        raw_validation_set[i] = raw_validation_set[i][4:, :]

    num_pres, num_neurons = numpy.shape(training_set)
    num_pres, kernel_size = numpy.shape(training_inputs)
    size = int(numpy.sqrt(kernel_size))

    raw_validation_data_set = numpy.rollaxis(numpy.array(raw_validation_set),
                                             2)

    params = {}
    params["LSCTM"] = True
    db_node = db_node.get_child(params)

    params = {}
    params["LaplacaBias"] = __main__.__dict__.get('LaplaceBias', 0.0004)
    params["LGN_NUM"] = __main__.__dict__.get('LgnNum', 6)
    params["num_neurons"] = __main__.__dict__.get('NumNeurons', 103)
    params["V1OF"] = __main__.__dict__.get('V1OF', 'Exp')
    params["LGNOF"] = __main__.__dict__.get('LGNOF', 'Exp')
    params["LGNTreshold"] = __main__.__dict__.get('LGNTreshold', False)
    params["HiddenLayerSize"] = __main__.__dict__.get('HiddenLayerSize', 1.0)
    params["LogLossCoef"] = __main__.__dict__.get('LogLossCoef', 1.0)
    params["NegativeLgn"] = __main__.__dict__.get('NegativeLgn', True)
    params["MaxW"] = __main__.__dict__.get('MaxW', 5000)
    params["GenerationSize"] = __main__.__dict__.get('GenerationSize', 100)
    params["PopulationSize"] = __main__.__dict__.get('PopulationSize', 100)
    params["MutationRate"] = __main__.__dict__.get('MutationRate', 0.05)
    params["CrossoverRate"] = __main__.__dict__.get('CrossoverRate', 0.9)

    db_node1 = db_node
    db_node = db_node.get_child(params)

    num_neurons = params["num_neurons"]

    raw_validation_data_set = numpy.rollaxis(numpy.array(raw_validation_set),
                                             2)

    [K, lsctm, rfs] = fitLSCTMEvo(numpy.mat(training_inputs1),
                                  numpy.mat(training_inputs2),
                                  numpy.mat(training_inputs3),
                                  numpy.mat(training_set), params["LGN_NUM"],
                                  num_neurons)

    (rfs1, rfs2, rfs3) = rfs

    pylab.figure()
    m = numpy.max(numpy.abs(rfs1))
    for i in xrange(
            0,
            int(num_neurons * __main__.__dict__.get('HiddenLayerSize', 1.0))):
        pylab.subplot(11, 11, i + 1)
        pylab.imshow(numpy.reshape(rfs1[i, 0:kernel_size], (size, size)),
                     vmin=-m,
                     vmax=m,
                     cmap=pylab.cm.RdBu,
                     interpolation='nearest')
    pylab.savefig('GLM_rfs1.png')

    pylab.figure()
    m = numpy.max(numpy.abs(rfs2))
    for i in xrange(
            0,
            int(num_neurons * __main__.__dict__.get('HiddenLayerSize', 1.0))):
        pylab.subplot(11, 11, i + 1)
        pylab.imshow(numpy.reshape(rfs2[i, 0:kernel_size], (size, size)),
                     vmin=-m,
                     vmax=m,
                     cmap=pylab.cm.RdBu,
                     interpolation='nearest')
    pylab.savefig('GLM_rfs2.png')

    pylab.figure()
    m = numpy.max(numpy.abs(rfs3))
    for i in xrange(
            0,
            int(num_neurons * __main__.__dict__.get('HiddenLayerSize', 1.0))):
        pylab.subplot(11, 11, i + 1)
        pylab.imshow(numpy.reshape(rfs3[i, 0:kernel_size], (size, size)),
                     vmin=-m,
                     vmax=m,
                     cmap=pylab.cm.RdBu,
                     interpolation='nearest')
    pylab.savefig('GLM_rfs3.png')

    lsctm_pred_act = lsctm.response(training_inputs1, training_inputs2,
                                    training_inputs3, K)
    lsctm_pred_val_act = lsctm.response(validation_inputs1, validation_inputs2,
                                        validation_inputs3, K)

    ofs = run_nonlinearity_detection(numpy.mat(training_set),
                                     numpy.mat(lsctm_pred_act),
                                     num_bins=10,
                                     display=False,
                                     name='RPI_piece_wise_nonlinearity.png')
    lsctm_pred_act_t = numpy.mat(
        apply_output_function(numpy.mat(lsctm_pred_act), ofs))
    lsctm_pred_val_act_t = numpy.mat(
        apply_output_function(numpy.mat(lsctm_pred_val_act), ofs))

    if len(raw_validation_set) != 1:
        print 'Without TF'
        (signal_power, noise_power, normalized_noise_power,
         training_prediction_power, rpi_validation_prediction_power,
         signal_power_variance) = performance_analysis(training_set,
                                                       validation_set,
                                                       lsctm_pred_act,
                                                       lsctm_pred_val_act,
                                                       raw_validation_set, 85)
        print 'With TF'
        (signal_power_t, noise_power_t, normalized_noise_power_t,
         training_prediction_power_t, rpi_validation_prediction_power_t,
         signal_power_variance_t) = performance_analysis(
             training_set, validation_set, lsctm_pred_act_t,
             lsctm_pred_val_act_t, raw_validation_set, 85)

        significant = numpy.array(
            numpy.nonzero((numpy.array(normalized_noise_power) < 85) * 1.0))[0]

        perf.append(numpy.mean(rpi_validation_prediction_power[significant]))
        perf_t.append(
            numpy.mean(rpi_validation_prediction_power_t[significant]))

    (train_c, val_c) = printCorrelationAnalysis(training_set, validation_set,
                                                lsctm_pred_act,
                                                lsctm_pred_val_act)
    (t_train_c, t_val_c) = printCorrelationAnalysis(training_set,
                                                    validation_set,
                                                    lsctm_pred_act_t,
                                                    lsctm_pred_val_act_t)
Пример #5
0
    54.9755813888,
]


for i in xrange(0, num_neurons):
    corrs = []
    for j in xrange(0, 40):
        pa = numpy.mat(training_inputs * rpis[j][:, i])
        pta = numpy.mat(testing_inputs * rpis[j][:, i])
        corrs.append(scipy.stats.pearsonr(numpy.array(pta).flatten(), numpy.array(testing_set)[:, i].flatten())[0])

    if i < 10:
        pylab.plot(corrs)
    print numpy.shape(best_rpis[:, i])
    print numpy.shape(rpis[numpy.argmax(corrs)][:, i])
    best_rpis[:, i] = rpis[numpy.argmax(corrs)][:, i]
    best_lambda[0, i] = a[numpy.argmax(corrs)]


print best_lambda

rpa = numpy.mat(training_inputs * best_rpis)
rpva = numpy.mat(validation_inputs * best_rpis)

ofs = run_nonlinearity_detection(numpy.mat(training_set), numpy.mat(rpa), display=False)
rpi_pred_act_t = apply_output_function(numpy.mat(rpa), ofs)
rpi_pred_val_act_t = apply_output_function(numpy.mat(rpva), ofs)
printCorrelationAnalysis(training_set, validation_set, rpi_pred_act_t, rpi_pred_val_act_t)

numpy.savetxt(animal_names[animal_num] + "STA_20091011.txt", best_rpis)
Пример #6
0
a = [1e-10,2e-10,1e-10,8e-10,1.6e-09,3.2e-09,6.4e-09,1.28e-08,2.56e-08,5.12e-08,1.024e-07,2.048e-07,4.096e-07,8.192e-07,1.6384e-06,3.2768e-06,6.5536e-06,1.31072e-05,2.62144e-05,5.24288e-05,0.0001048576,0.0002097152,0.0004194304,0.0008388608,0.0016777216,0.0033554432,0.0067108864,0.0134217728,0.0268435456,0.0536870912,0.1073741824,0.2147483648,0.4294967296,0.8589934592,1.7179869184,3.4359738368,6.8719476736,13.7438953472,27.4877906944,54.9755813888]


for i in xrange(0,num_neurons):
    corrs=[]
    for j in xrange(0,40):
	pa = numpy.mat(training_inputs * rpis[j][:,i])
        pta = numpy.mat(testing_inputs * rpis[j][:,i])
	corrs.append(scipy.stats.pearsonr(numpy.array(pta).flatten(),numpy.array(testing_set)[:,i].flatten())[0])
	
    if i < 10:
       pylab.plot(corrs)
    print numpy.shape(best_rpis[:,i])
    print numpy.shape(rpis[numpy.argmax(corrs)][:,i])
    best_rpis[:,i] = rpis[numpy.argmax(corrs)][:,i]
    best_lambda[0,i] = a[numpy.argmax(corrs)]


print best_lambda

rpa = numpy.mat(training_inputs * best_rpis)
rpva = numpy.mat(validation_inputs * best_rpis)
     
ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(rpa),display=False)
rpi_pred_act_t = apply_output_function(numpy.mat(rpa),ofs)
rpi_pred_val_act_t = apply_output_function(numpy.mat(rpva),ofs)
printCorrelationAnalysis(training_set,validation_set,rpi_pred_act_t,rpi_pred_val_act_t)

numpy.savetxt(animal_names[animal_num]+'STA_20091011.txt',best_rpis)
Пример #7
0
def runLSCTM():
    import noiseEstimation
    res = contrib.dd.DB(None)
    (sizex,sizey,training_inputs,training_set,validation_inputs,validation_set,ff,db_node) = contrib.JanA.dataimport.sortOutLoading(res)
    raw_validation_set = db_node.data["raw_validation_set"]
    
    
    training_inputs1 = training_inputs[0:-4,:]
    validation_inputs1 = validation_inputs[0:-4,:]
    training_inputs2 = training_inputs[1:-3,:]
    validation_inputs2 = validation_inputs[1:-3,:]
    training_inputs3 = training_inputs[2:-2,:]
    validation_inputs3 = validation_inputs[2:-2,:]
    
    
    training_set = training_set[4:,:]
    validation_set = validation_set[4:,:]
	
    for i in xrange(0,len(raw_validation_set)):
	raw_validation_set[i] = raw_validation_set[i][4:,:]
    
    num_pres,num_neurons = numpy.shape(training_set)
    num_pres,kernel_size = numpy.shape(training_inputs)
    size = int(numpy.sqrt(kernel_size))

    raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
    
    
    params={}
    params["LSCTM"]=True
    db_node = db_node.get_child(params)
    
    params={}
    params["LaplacaBias"] = __main__.__dict__.get('LaplaceBias',0.0004)
    params["LGN_NUM"] = __main__.__dict__.get('LgnNum',6)
    params["num_neurons"] = __main__.__dict__.get('NumNeurons',103)
    params["V1OF"] = __main__.__dict__.get('V1OF','Exp')
    params["LGNOF"] = __main__.__dict__.get('LGNOF','Exp')
    params["LGNTreshold"] =  __main__.__dict__.get('LGNTreshold',False)
    params["HiddenLayerSize"] = __main__.__dict__.get('HiddenLayerSize',1.0)
    params["LogLossCoef"] = __main__.__dict__.get('LogLossCoef',1.0)
    params["NegativeLgn"] = __main__.__dict__.get('NegativeLgn',True)
    params["MaxW"] = __main__.__dict__.get('MaxW',5000)
    params["GenerationSize"] = __main__.__dict__.get('GenerationSize',100)
    params["PopulationSize"] = __main__.__dict__.get('PopulationSize',100)
    params["MutationRate"] = __main__.__dict__.get('MutationRate',0.05)
    params["CrossoverRate"] = __main__.__dict__.get('CrossoverRate',0.9)
    
    db_node1 = db_node
    db_node = db_node.get_child(params)
    
    num_neurons=params["num_neurons"]
    
    raw_validation_data_set=numpy.rollaxis(numpy.array(raw_validation_set),2)
    
    [K,lsctm,rfs]=  fitLSCTMEvo(numpy.mat(training_inputs1),numpy.mat(training_inputs2),numpy.mat(training_inputs3),numpy.mat(training_set),params["LGN_NUM"],num_neurons)
  
    (rfs1,rfs2,rfs3) = rfs
    
    
    pylab.figure()
    m = numpy.max(numpy.abs(rfs1))
    for i in xrange(0,int(num_neurons*__main__.__dict__.get('HiddenLayerSize',1.0))):
	pylab.subplot(11,11,i+1)
    	pylab.imshow(numpy.reshape(rfs1[i,0:kernel_size],(size,size)),vmin=-m,vmax=m,cmap=pylab.cm.RdBu,interpolation='nearest')
    pylab.savefig('GLM_rfs1.png')	
    
    pylab.figure()
    m = numpy.max(numpy.abs(rfs2))
    for i in xrange(0,int(num_neurons*__main__.__dict__.get('HiddenLayerSize',1.0))):
	pylab.subplot(11,11,i+1)
    	pylab.imshow(numpy.reshape(rfs2[i,0:kernel_size],(size,size)),vmin=-m,vmax=m,cmap=pylab.cm.RdBu,interpolation='nearest')
    pylab.savefig('GLM_rfs2.png')	

    pylab.figure()
    m = numpy.max(numpy.abs(rfs3))
    for i in xrange(0,int(num_neurons*__main__.__dict__.get('HiddenLayerSize',1.0))):
	pylab.subplot(11,11,i+1)
    	pylab.imshow(numpy.reshape(rfs3[i,0:kernel_size],(size,size)),vmin=-m,vmax=m,cmap=pylab.cm.RdBu,interpolation='nearest')
    pylab.savefig('GLM_rfs3.png')	
    

    lsctm_pred_act = lsctm.response(training_inputs1,training_inputs2,training_inputs3,K)
    lsctm_pred_val_act = lsctm.response(validation_inputs1,validation_inputs2,validation_inputs3,K)
    
    ofs = run_nonlinearity_detection(numpy.mat(training_set),numpy.mat(lsctm_pred_act),num_bins=10,display=False,name='RPI_piece_wise_nonlinearity.png')
    lsctm_pred_act_t = numpy.mat(apply_output_function(numpy.mat(lsctm_pred_act),ofs))
    lsctm_pred_val_act_t = numpy.mat(apply_output_function(numpy.mat(lsctm_pred_val_act),ofs))
    
    if len(raw_validation_set) != 1:
		print 'Without TF'
		(signal_power,noise_power,normalized_noise_power,training_prediction_power,rpi_validation_prediction_power,signal_power_variance) =performance_analysis(training_set,validation_set,lsctm_pred_act,lsctm_pred_val_act,raw_validation_set,85)
		print 'With TF'
		(signal_power_t,noise_power_t,normalized_noise_power_t,training_prediction_power_t,rpi_validation_prediction_power_t,signal_power_variance_t) =performance_analysis(training_set,validation_set,lsctm_pred_act_t,lsctm_pred_val_act_t,raw_validation_set,85)
		
		significant = numpy.array(numpy.nonzero((numpy.array(normalized_noise_power) < 85) * 1.0))[0]
		
		perf.append(numpy.mean(rpi_validation_prediction_power[significant]))
		perf_t.append(numpy.mean(rpi_validation_prediction_power_t[significant]))
    
    (train_c,val_c) = printCorrelationAnalysis(training_set,validation_set,lsctm_pred_act,lsctm_pred_val_act)
    (t_train_c,t_val_c) = printCorrelationAnalysis(training_set,validation_set,lsctm_pred_act_t,lsctm_pred_val_act_t)