コード例 #1
0
ファイル: ann.py プロジェクト: aplassard/Image_Processing
def start():
    featuresList=[]
    labelsList=[]
    featuresList, labelsList= loadFile("output.txt")

    print 'Normalizing array...'

    normalizearray(featuresList)
   
 
    alldata = ClassificationDataSet( len(featuresList[0]), 1, nb_classes=8, class_labels=['ffi_brainmatter','ffi_neuron','ffi_vacuole','ffi_astrocyte', 'wt_brainmatter', 'wt_neuron', 'wt_vacuole', 'wt_astrocyte'] )
    for i in range(len(labelsList)):
         alldata.appendLinked(featuresList[i], labelsList[i])
          
         
    #print 'All data: ', alldata
    #print 'Statisticcs: ', alldata.calculateStatistics()
    
    newK=fSel.getTreeFeatures(featuresList, labelsList);
    newK=newK.shape[1]
    print "K= ", newK
    reducedFeatures= fSel.getBestK(featuresList,labelsList, 'f_classif', newK)
    reducedData=ClassificationDataSet( len(reducedFeatures[0]), 1, nb_classes=8, class_labels=['ffi_brainmatter','ffi_neuron','ffi_vacuole','ffi_astrocyte', 'wt_brainmatter', 'wt_neuron', 'wt_vacuole', 'wt_astrocyte'] )
    
    #prep reducedData object with reduced feature list
    for i in range(len(labelsList)):
        reducedData.appendLinked(reducedFeatures[i], labelsList[i])
    
    
    print 'Splitting test and training data...'
    tstdata, trndata = alldata.splitWithProportion( 0.30 )
    reducedTestData, reducedTrainData=reducedData.splitWithProportion(0.3)
    
    print 'Number of training and test patterns: ', len(trndata), len(tstdata)
    
    
    trndata._convertToOneOfMany(bounds=[0,1])
    tstdata._convertToOneOfMany(bounds=[0,1])  
    
    reducedTestData._convertToOneOfMany(bounds=[0,1])
    reducedTrainData._convertToOneOfMany(bounds=[0,1])
    
    
    #print "Number of training patterns: ", len(trndata)
    print "Input and output dimensions: ", trndata.indim, trndata.outdim
    #print "Sample (input, target, class):"
    #print trndata['input'][0], trndata['target'][0], trndata['class'][0]
    #print trndata['input'][1], trndata['target'][1], trndata['class'][1]

    
    buildFNN(tstdata, trndata)
    
    print "___________________________________________FEATURE REDUCTION________________________________________________"
    buildFNN(reducedTestData, reducedTrainData)
コード例 #2
0
def initialize(trainingfeatures,traininglabels,p=0.7):
    alldata = ClassificationDataSet(trainingfeatures.shape[1], 1, nb_classes=len(set(traininglabels)))
    for i in xrange(traininglabels[0]):
       	alldata.appendLinked(trainingfeatures[i] , traininglabels[i])
    trndata, tstdata = alldata.splitWithProportion( p )
    trndata._convertToOneOfMany(bounds=[0, 1])
    tstdata._convertToOneOfMany(bounds=[0, 1])
    model, accuracy, params = buildANN(trndata, tstdata)
    print '\nThe best model had '+str(accuracy)+'% accuracy and used the parameters:\n'+params+'\n'
    return model
コード例 #3
0
def pybrainData(split, data=None):
	# taken from iris data set at machine learning repository
	if not data:
		pat = cat1 + cat2 + cat3
	else:
		pat = data
	alldata = ClassificationDataSet(4, 1, nb_classes=3,
		class_labels=['set', 'vers', 'virg'])
	for p in pat:
		t = p[2]
		alldata.addSample(p[0], t)
	tstdata, trndata = alldata.splitWithProportion(split)
	trndata._convertToOneOfMany()
	tstdata._convertToOneOfMany()
	return trndata, tstdata
コード例 #4
0
def pybrainData(split, data=None):
    # taken from iris data set at machine learning repository
    if not data:
        pat = cat1 + cat2 + cat3
    else:
        pat = data
    alldata = ClassificationDataSet(4,
                                    1,
                                    nb_classes=3,
                                    class_labels=['set', 'vers', 'virg'])
    for p in pat:
        t = p[2]
        alldata.addSample(p[0], t)
    tstdata, trndata = alldata.splitWithProportion(split)
    trndata._convertToOneOfMany()
    tstdata._convertToOneOfMany()
    return trndata, tstdata
fobj = open('02 select_data_num.csv', 'wb')
[(fobj.write(item), fobj.write(',')) for item in header]
fobj.write('\n')
[([(fobj.write(str(it).replace(',', ' ')), fobj.write(','))
   for it in item], fobj.write('\n')) for item in numdata]
fobj.close()

npdata = np.array(numdata, dtype=np.float)
npdata[:, 2:] = preprocessing.scale(npdata[:, 2:])
numdata = copy.deepcopy(npdata)

net = buildNetwork(14, 14, 1, bias=True, outclass=SoftmaxLayer)
ds = ClassificationDataSet(14, 1, nb_classes=2)
for item in numdata:
    ds.addSample(tuple(item[2:]), (item[1]))
dsTrain, dsTest = ds.splitWithProportion(0.8)

print('Trainging')
trainer = BackpropTrainer(net,
                          ds,
                          momentum=0.1,
                          verbose=True,
                          weightdecay=0.01)
# trainer.train()
trainer.trainUntilConvergence(maxEpochs=20)
print('Finish training')

Traininp = dsTrain['input']
Traintar = dsTrain['target']
Testinp = dsTest['input']
Testtar = dsTest['target']
コード例 #6
0
from sklearn import datasets
from pybrain.datasets.classification import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

iris = datasets.load_iris()

x, y = iris.data, iris.target
print(len(x))

dataset = ClassificationDataSet(4, 1, nb_classes=3)

for i in range(len(x)):
    dataset.addSample(x[i], y[i])

train_data, part_data = dataset.splitWithProportion(0.6)

test_data, val_data = part_data.splitWithProportion(0.5)

net = buildNetwork(dataset.indim, 3, dataset.outdim)
trainer = BackpropTrainer(net,
                          dataset=train_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

train_errors, val_errors = trainer.trainUntilConvergence(dataset=train_data,
                                                         maxEpochs=100)

trainer.totalepochs
コード例 #7
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import matplotlib.pyplot as plt

iris = datasets.load_iris()

entrada, saida = iris.data, iris.target

dataset = ClassificationDataSet(4, 1, nb_classes=3)

#Adicionar as amostras ao dataset
for i in range(len(entrada)):
    dataset.addSample(entrada[i], saida[i])

#Recuperar dados para realizar o treinamento da rede
parteTreino, parteDados = dataset.splitWithProportion(0.6)
print("Quantidade para treinamento da rede : " + str(len(parteTreino)))

#Separando a parte de dados para realização do teste e para a validação da rede
teste, validacao = parteDados.splitWithProportion(0.5)
print("Quantidade para teste da rede : " + str(len(teste)))
print("Quantidade para validação da rede : " + str(len(validacao)))

#Criando a rede
rede = buildNetwork(dataset.indim, 3, dataset.outdim)

#Realizando o treinamento e recuperando os erros
treinamento = BackpropTrainer(rede,
                              dataset=parteTreino,
                              learningrate=0.01,
                              momentum=0.1,
import matplotlib.pyplot as plt

from sklearn import datasets
from pybrain.datasets.classification import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer

iris = datasets.load_iris()
X, y = iris.data, iris.target
dataset = ClassificationDataSet(4, 1, nb_classes=3)

for sample_input, sample_output in zip(X, y):
    dataset.addSample(sample_input, sample_output)

# Partitioning data for training
training_data, partitioned_data = dataset.splitWithProportion(0.6)

# Spliting data for testing and validation
testing_data, validation_data, = partitioned_data.splitWithProportion(0.5)

network = buildNetwork(dataset.indim, 2, 2, dataset.outdim)
trainer = BackpropTrainer(network,
                          dataset=training_data,
                          learningrate=0.01,
                          momentum=0.1,
                          verbose=True)

training_errors, validation_errors = trainer.trainUntilConvergence(
    dataset=training_data, maxEpochs=200)
plt.plot(training_errors, 'b', validation_errors, 'r')
plt.legend()
コード例 #9
0
from sklearn import datasets
from pybrain.datasets.classification import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

iris = datasets.load_iris()
x, y = iris.data, iris.target
dataset = ClassificationDataSet(4, 1, nb_classes=3)

for i in range(len(x)):
    dataset.addSample(x[i], y[i])

train_data_temp, part_data_temp = dataset.splitWithProportion(0.6)
test_data_temp, val_data_temp = part_data_temp.splitWithProportion(0.5)

train_data = ClassificationDataSet(4, 1, nb_classes=3)
for n in range(train_data_temp.getLength()):
    train_data.addSample(
        train_data_temp.getSample(n)[0],
        train_data_temp.getSample(n)[1])

test_data = ClassificationDataSet(4, 1, nb_classes=3)
for n in range(test_data_temp.getLength()):
    train_data.addSample(
        test_data_temp.getSample(n)[0],
        test_data_temp.getSample(n)[1])

val_data = ClassificationDataSet(4, 1, nb_classes=3)
for n in range(val_data_temp.getLength()):
    val_data.addSample(
        val_data_temp.getSample(n)[0],
コード例 #10
0
X, y = iris.data, iris.target

from pybrain.datasets.classification import ClassificationDataSet
from pybrain.utilities import percentError
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
#import numpy as np
import matplotlib.pyplot as pl

ds = ClassificationDataSet(4, 1, nb_classes=3)
for i in range(len(X)):
    ds.addSample(X[i], y[i])

# splitting data into train,test and valid data in 60/20/20 proportions
trndata, partdata = ds.splitWithProportion(0.60)
tstdata, validdata = partdata.splitWithProportion(0.50)

# to encode classes wwith one output neuron per class
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
validdata._convertToOneOfMany()

# original target values are stored in class created by function to
#preserve the value
print trndata['class']
# new values of target after convertion
print trndata['target']

# check the dimensions of input(4types) and output(3 categories)
print trndata.indim, trndata.outdim, tstdata.indim, tstdata.outdim
コード例 #11
0
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

n.sortModules()


print 'build set'

alldata = ClassificationDataSet(dim, 1, nb_classes=2)

(data,label,items) = BinReader.readData(ur'F:\AliRecommendHomeworkData\1212新版\train15_17.expand.samp.norm.bin') 
#(train,label,data) = BinReader.readData(r'C:\data\small\norm\train1217.bin')
for i in range(len(data)):
    alldata.addSample(data[i],label[i])

tstdata, trndata = alldata.splitWithProportion(0.25)

trainer = BackpropTrainer(n,trndata,momentum=0.1,verbose=True,weightdecay=0.01)

print 'start'
#trainer.trainEpochs(1)
trainer.trainUntilConvergence(maxEpochs=2)
trnresult = percentError(trainer.testOnClassData(),trndata['class'])

tstresult = percentError(trainer.testOnClassData(dataset=tstdata), tstdata['class'])

print "epoch: %4d" % trainer.totalepochs, \
        "  train error: %5.2f%%" % trnresult, \
        "  test error: %5.2f%%" % tstresult

print 'get result'
コード例 #12
0
from pybrain.tools.customxml.networkwriter import NetworkWriter
from pybrain.tools.customxml.networkreader import NetworkReader
import os

# Downloading Dataset
olivetti = datasets.fetch_olivetti_faces()
oData, oTarget = olivetti.data, olivetti.target

# Initializing Dataset
dataset = ClassificationDataSet(4096, 1, nb_classes=40)

for i in range(len(oData)):
	dataset.addSample(ravel(oData[i]), oTarget[i])

# Splitting dataset for 75% training data and 25% test data
testData, trainingData = dataset.splitWithProportion(0.25)

trainingData._convertToOneOfMany()
testData._convertToOneOfMany()

# Neural Network Construction
# Load previous training if it exists
if os.path.isfile('oliv.xml'):
	print('Loading Previous Training Data...')
	fnn = NetworkReader.readFrom('oliv.xml')
	print('Training Data Loaded!\n')
# Build fresh network if training does not exist
else:
	fnn = buildNetwork(trainingData.indim, 64, trainingData.outdim, outclass=SoftmaxLayer)

# Trainer initialization
コード例 #13
0
y = pd.Series.as_matrix(y)
#print "datatypes",type(X),type(y)

ds = ClassificationDataSet(13, 1, nb_classes=2)
for i in range(len(X)):
    ds.addSample(X[i], y[i])

# since X has 13 columns our nput of nueral network are 13 nodes
#ds = pyd.ClassificationDataSet(13, 1 , nb_classes=2) #,class_labels=['yes','no'])
#print ds
# ravel is used for ordering and joining into 1-D array
#for i in range(len(X)):
#    ds.addSample(X[i],y[i])

# splitting data into test and train
tstdata, trndata = ds.splitWithProportion(0.30)
# converts one to 2 binary outputs,assign each output to one neuron
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
print trndata['input'], trndata['target'], tstdata.indim, tstdata.outdim

# original target values are stored in class created by function to
#preserve the value
print trndata['class']
# new values of target after convertion
print trndata['target']

fnn = buildNetwork(trndata.indim, trndata.outdim, outclass=SoftmaxLayer)
#The learning rate gives the ratio of which parameters are changed into the direction of the gradient. The learning rate
#decreases by lrdecay, which is used to to multiply the learning rate after each training step. The parameters are also adjusted
# with respect to momentum, which is the ratio by which the gradient of the last timestep is used.