コード例 #1
0
	def __init__(self, histogram_list):
		self.net = buildNetwork(1024, 100, 1)

		ds = SupervisedDataSet(1024, 1)

		for histogram in histogram_list:
			#print (histogram)			
			ds.addSample(histogram, (1,))
		
		for x in range(0,15):
			ds.addSample(numpy.random.random((1024)) * 255, (0,)) # this noise should never be a face
			#print (numpy.random.random((1024)) * 255)
		
		trainer = BackpropTrainer(self.net, ds)

		#trainer.trainUntilConvergence()

		
		for x in range(2000):
			print ("count:\t" + str(x) + "\terror:\t" + str(trainer.train()))
			#trainer.train()
		
		print (trainer.train())

		"""
コード例 #2
0
def move_function(board):
    global net  
    best_max_move = None 
    max_value = -1000
    best_min_move = None
    min_value = 1000

    #value is the chance of black winning
    for m in board.get_moves():
        nextboard = board.peek_move(m)
        value = net.activate(board_to_input(nextboard))
        if value > max_value: 
            max_value = value
            best_max_move = m 
        if value < min_value:
            min_value = value
            best_min_move = m

    ds = SupervisedDataSet(97, 1)
    best_move = None 

    #active player
    if board.active == BLACK:
        ds.addSample(board_to_input(board), max_value)
        best_move = best_max_move
    elif board.active == WHITE: 
        ds.addSample(board_to_input(board), min_value)
        best_move = best_min_move

    trainer = BackpropTrainer(net, ds)
    trainer.train()
    NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
    NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml') 
    return best_move 
コード例 #3
0
 def neuralNetwork_eval_func(self, chromosome):
     node_num, learning_rate, window_size = self.decode_chromosome(chromosome)
     if self.check_log(node_num, learning_rate, window_size):
         return self.get_means_from_log(node_num, learning_rate, window_size)[0]
     folded_dataset = self.create_folded_dataset(window_size)
     indim = 21 * (2 * window_size + 1)
     mean_AUC = 0
     mean_decision_value = 0
     mean_mcc = 0
     sample_size_over_thousand_flag = False
     for test_fold in xrange(self.fold):
         test_labels, test_dataset, train_labels, train_dataset = folded_dataset.get_test_and_training_dataset(test_fold)
         if len(test_labels) + len(train_labels) > 1000:
             sample_size_over_thousand_flag = True
         ds = SupervisedDataSet(indim, 1)
         for i in xrange(len(train_labels)):
             ds.appendLinked(train_dataset[i], [train_labels[i]])
         net = buildNetwork(indim, node_num, 1, outclass=SigmoidLayer, bias=True)
         trainer = BackpropTrainer(net, ds, learningrate=learning_rate)
         trainer.trainUntilConvergence(maxEpochs=self.maxEpochs_for_trainer)
         decision_values = [net.activate(test_dataset[i]) for i in xrange(len(test_labels))]
         decision_values = map(lambda x: x[0], decision_values)
         AUC, decision_value_and_max_mcc = validate_performance.calculate_AUC(decision_values, test_labels)
         mean_AUC += AUC
         mean_decision_value += decision_value_and_max_mcc[0]
         mean_mcc += decision_value_and_max_mcc[1]
         if sample_size_over_thousand_flag:
             break
     if not sample_size_over_thousand_flag:
         mean_AUC /= self.fold
         mean_decision_value /= self.fold
         mean_mcc /= self.fold
     self.write_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     self.add_log(node_num, learning_rate, window_size, mean_AUC, mean_decision_value, mean_mcc)
     return mean_AUC
コード例 #4
0
ファイル: entrenar.py プロジェクト: Taberu/despierta
def entrenarSomnolencia(red):
    #Se inicializa el dataset
    ds = SupervisedDataSet(4096,1)

    """Se crea el dataset, para ello procesamos cada una de las imagenes obteniendo los rostros,
       luego se le asignan los valores deseados del resultado la red neuronal."""

    print "Somnolencia - cara"
    for i,c in enumerate(os.listdir(os.path.dirname('/home/taberu/Imágenes/img_tesis/somnoliento/'))):
        try:
            im = cv2.imread('/home/taberu/Imágenes/img_tesis/somnoliento/'+c)
            pim = pi.procesarImagen(im)
            cara = d.deteccionFacial(pim)
            if cara == None:
                print "No hay cara"
            else:
                print i
                ds.appendLinked(cara.flatten(),10)
        except:
            pass

    trainer = BackpropTrainer(red, ds)
    print "Entrenando hasta converger"
    trainer.trainUntilConvergence()
    NetworkWriter.writeToFile(red, 'rna_somnolencia.xml')
コード例 #5
0
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	
	test_set = test.keys()
	train_set = []
	for k in inputs.keys():
		if k not in test_set:
			train_set.append(k)
	print "Number of training samples", len(train_set)
	print "Number of testing samples", len(test_set)
			
	net = buildNetwork(178, 6, 5)
	ds=SupervisedDataSet(178,5)
	for id in train_set:
		ds.addSample(inputs[id],outputs[id])

	trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum = 0.001)

	trainer.trainUntilConvergence(maxEpochs=1000, validationProportion = 0.5)
	
	
	for id in test_set:
		predicted = net.activate(inputs[id])
		actual = outputs[id]
		print '-----------------------------'
		print test[id]
		print '-----------------------------'
		print 'Trait\t\tPredicted\tActual\tError'
		for i in range(0,5):
			error = abs(predicted[i] - actual[i])*100/4.0
			print traits[i], '\t', predicted[i], '\t', actual[i], '\t', error,"%" 
コード例 #6
0
ファイル: nn.py プロジェクト: PascalSteger/aqualytic
def NeuralNetwork(tRiver, qRiver, pRiver, TRiver, qnewRiver, pnewRiver, TnewRiver):
    # build neural network with 20 neurons for historic data on flux, 3 for last 3 temp data, 3 for last precipitation,
    # hidden layer with more than input neurons (hinder specification)
    # and 3 output neurons (flux for next day, first derivative, second derivative

    Ndim = 10+3+3
    Nout = 3
    net = buildNetwork(Ndim, Ndim, Nout, hiddenclass=TanhLayer)
    ds = SupervisedDataSet(Ndim, Nout)

    # next big job: find data values to build up library of training set
    for t in range(len(tRiver)-3):
        input_flow = qRiver[t-20:2:t]
        input_prec = pRiver[t-3:t]
        input_temp = TRiver[t-3:t]
        input_vec = np.hstack([input_flow, input_prec, input_temp])

        output_flow = np.hstack([qRiver[t:t+3]]) # first approx, split later for long predictions
        ds.addSample(input_vec, output_flow)

    trainer = BackpropTrainer(net, ds)
    #trainer.train()
    trainer.trainUntilConvergence()

    # now call it repeatedly on the second set

    prediction = net.activate(np.hstack([qnewRiver[:20], pnewRiver[:3], TnewRiver[:3]]))
    return prediction
コード例 #7
0
ファイル: evaluation.py プロジェクト: sethmccammon/rob537
def getErrorPercent(training_dataset, eval_dataset_list, num_hidden, num_epochs):
  num_datapoints = len(training_dataset)
  num_inputs = len(training_dataset[0][0])
  num_outputs = len(training_dataset[0][1])

  # print "Num Inputs:", num_inputs
  # print "Num Outputs:", num_outputs
  # print "Num Hidden Nodes:", num_hidden

  NN = buildNetwork(num_inputs, num_hidden, num_outputs, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)

  dataset = SupervisedDataSet(num_inputs, num_outputs)
  for datapoint in training_dataset:
    dataset.addSample(datapoint[0], datapoint[1])


  trainer = BackpropTrainer(NN, dataset=dataset, momentum=0.0, verbose=False, weightdecay=0.0)

  for epoch in range(0, num_epochs):
    #print epoch 
    trainer.train()

  errors = []
  for eval_set in eval_dataset_list:
    total_percent_errors = [0]*num_outputs
    for jj in range(0, len(eval_set)):
      nn_out = NN.activate(eval_set[jj][0])
      percent_error = computeError(eval_set[jj][1], nn_out)
      #print percent_error
      total_percent_errors = map(operator.add, percent_error, total_percent_errors)
    #print total_percent_errors
    errors.append(map(operator.div, total_percent_errors, [len(dataset)]*num_outputs))
  #print errors
  return errors
コード例 #8
0
ファイル: neural.py プロジェクト: crcollins/ML
class NeuralNet(object):
    def __init__(self, layers):
        self.layers = layers
        self.ds = None
        self.train_error = []
        self.test_error = []
        self.norm_error = []

    def improve(self, n=10):
        trainer = BackpropTrainer(self.nn, self.ds)
        for i in xrange(n):
            self.train_error.append(trainer.train())

    def fit(self, X, y):
        self.nn = buildNetwork(*self.layers, bias=True, hiddenclass=SigmoidLayer)

        self.ds = SupervisedDataSet(self.layers[0], self.layers[-1])
        for i, row in enumerate(X):
            self.ds.addSample(row.tolist(), y[i])
        self.improve()

    def predict(self, X):
        r = []
        for row in X.tolist():
            r.append(self.nn.activate(row))
        return numpy.array(r)
コード例 #9
0
ファイル: trainNN.py プロジェクト: onidzelskyi/VSN
def loadDataSet(ds_file):
  global X, Y
  BB = set()
  aaa = {}
  ds = SupervisedDataSet(400, 10)
  #ds = SupervisedDataSet(1024, 5)
  with open(ds_file,"rb") as f:
    lines = f.readlines()
    for line in lines:
      l = [float(a) for a in line.strip().split(',')]
      #A = [float(1.0)] + l[:-1]
      A = l[:-1]
      X.append(A)
      B = int(l[-1])
      #BB.update([B])
      #for aa,bb in enumerate(BB):
      #  aaa[bb] = aa
      #print aaa
      #Y.append(aaa[bb])
      Y.append(B)
      C = []
      for i in range(10):
        C.append(int(1) if i==B or (i==0 and B==10) else int(0))
      ds.addSample(tuple(A), tuple(C))
  return ds
コード例 #10
0
ファイル: nnPredict.py プロジェクト: lovelyhe/Contest
def getModel(inputSize,hiddenSize1,hiddenSize2,trainData,target):
	fnn = FeedForwardNetwork()
	inLayer = LinearLayer(inputSize,name = 'inLayer')
	hiddenLayer0 = SigmoidLayer(hiddenSize1,name='hiddenLayer0')
	hiddenLayer1 = SigmoidLayer(hiddenSize2,name='hiddenLayer1')
	outLayer = LinearLayer(1,name = 'outLayer')

	fnn.addInputModule(inLayer)
	fnn.addModule(hiddenLayer0)
	fnn.addModule(hiddenLayer1)
	fnn.addOutputModule(outLayer)

	inToHidden0 = FullConnection(inLayer,hiddenLayer0)
	hidden0ToHidden1 = FullConnection(hiddenLayer0,hiddenLayer1)
	hidden1ToHiddenOutput = FullConnection(hiddenLayer1,outLayer)

	fnn.addConnection(inToHidden0)
	fnn.addConnection(hidden0ToHidden1)
	fnn.addConnection(hidden1ToHiddenOutput)

	fnn.sortModules()
	Ds = SupervisedDataSet(inputSize,1)
	scaler = preprocessing.StandardScaler().fit(trainData)
	x = scaler.transform(trainData)
	# print(len(target))
	# print(len(x))
	for i in range(len(target)):
		Ds.addSample(x[i],[target[i]])
	trainer = BackpropTrainer(fnn,Ds,learningrate=0.01,verbose=False)
	trainer.trainUntilConvergence(maxEpochs=1000)
	return fnn
コード例 #11
0
class Brain:
	def __init__(self, hiddenNodes = 30):
		# construct neural network 
		self.myClassifierNet = buildNetwork(12, hiddenNodes, 1, bias=True, hiddenclass=TanhLayer) #parameters to buildNetwork are inputs, hidden, output
		# set up dataset
		self.myDataset = SupervisedDataSet(12, 1)
		self.myClassifierTrainer = BackpropTrainer(self.myClassifierNet, self.myDataset)

	def addSampleImageFromFile(self, imageFile, groupId):
		"adds a data sample from an image file, including needed processing"
		myImage = Image.open(imageFile)
		self.myDataset.addSample(twelveToneParallel(myImage), (groupId,))

	def train(self):
		#myClassifierTrainer.trainUntilConvergence() #this will take forever (possibly literally in the pathological case)
		for i in range(0, 15):
			self.myClassifierTrainer.train() #this may result in an inferior network, but in practice seems to work fine

	def save(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'w')
		pickle.dump(self.myClassifierNet, saveFile)
		saveFile.close()

	def load(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'r')
		myClassifierNet = pickle.load(saveFile)
		saveFile.close()

	def classify(self, fileName):
		myImage = Image.open(fileName)
		if self.myClassifierNet.activate(twelveToneParallel(myImage)) < 0.5:
			return 0
		else:
			return 1
コード例 #12
0
ファイル: glaive.py プロジェクト: peixian/Ultralisk
def createNet():
	"""Create and seed the intial neural network"""
	#CONSTANTS
	nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
	nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]

	allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()

	ds = SupervisedDataSet(nn_input_dim, nn_output_dim)

	#normalizes and adds it to the dataset
	for i in range(0, len(allyTrainingPos)):
		x = normalize(enemyTrainingPos[i])
		y = normalize(allyTrainingPos[i])
		x = [val for pair in x for val in pair]
		y = [val for pair in y for val in pair]
		ds.addSample(x, y)

	for inpt, target in ds:
		print inpt, target

	net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
	trainer = BackpropTrainer(net, ds)
	trainer.trainUntilConvergence()
	NetworkWriter.writeToFile(net, "net.xml")
	enemyTestPos = runExperiments.makeTestDataset()
	print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
	return ds
コード例 #13
0
ファイル: eunfq.py プロジェクト: myeaton1/euphoriaAI
    def learn(self):
        # convert reinforcement dataset to NFQ supervised dataset
        supervised = SupervisedDataSet(self.module.network.indim, 1)

        for seq in self.dataset:
            lastexperience = None
            for state, action, reward in seq:
                if not lastexperience:
                    # delay each experience in sequence by one
                    lastexperience = (state, action, reward)
                    continue

                # use experience from last timestep to do Q update
                (state_, action_, reward_) = lastexperience

                Q = self.module.getValue(state_, action_[0])

                inp = r_[state_, one_to_n(action_[0], self.module.numActions)]
                tgt = Q + 0.5*(reward_ + self.gamma * max(self.module.getActionValues(state)) - Q)
                supervised.addSample(inp, tgt)

                # update last experience with current one
                lastexperience = (state, action, reward)

        # train module with backprop/rprop on dataset
        trainer = RPropMinusTrainer(self.module.network, dataset=supervised, batchlearning=True, verbose=False)
        trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)
コード例 #14
0
ファイル: rep.py プロジェクト: kevcal69/thesis
def trainDataSet():
    cases = Case.objects.exclude(geocode__isnull=True, geocode__grid=-1)

    print "Data Representation"
    ds = SupervisedDataSet(5245, 5245)
    for w in xrange(0,52):
        print "Start week w",
        dataset_input = [0 for i in xrange(0,5245)]
        dataset_output = [0 for i in xrange(0,5245)]
        for i in xrange(0,5245):
            dataset_input[i] = cases.filter(geocode__grid=i, morbidity__week=w).count()
            dataset_output[i] = 1 if (cases.filter(geocode__grid=i, morbidity__week=w+1).count() > 0 or cases.filter(geocode__grid=i, morbidity__week=w+2).count() > 0) else 0
        ds.addSample( (dataset_input), (dataset_output))
        print " - done week w"
    # tstdata, trndata = ds.splitWithProportion(0.25)
    print "Train"
    net = buildNetwork( 5245, 1000, 5245, bias=True)
    trainer = BackpropTrainer(net, ds, learningrate=0.1, momentum=0.99)

    terrors = trainer.trainUntilConvergence(verbose = None, validationProportion = 0.33, maxEpochs = 1000, continueEpochs = 10 )
    # print terrors[0][-1],terrors[1][-1]
    fo = open("data.txt", "w")
    for input, expectedOutput in ds:
        output = net.activate(input)
        count = 0
        for q in xrange(0, 5245):
            print math.floor(output[q]), math.floor(expectedOutput[q])
            if math.floor(output[q]) == math.floor(expectedOutput[q]):
                count+=1    
        m = count/5245
        fo.write("{0} ::  {1}".format(count, m));
コード例 #15
0
ファイル: rep.py プロジェクト: kevcal69/thesis
def run_data():
    with open('new_data2.txt') as data_file:
        data = json.load(data_file)
    ds = SupervisedDataSet(1316, 1316)
    for i in xrange(0, 51):
        print "Adding {}th data sample".format(i),
        input = tuple(data[str(i)]['input'])
        output = tuple(data[str(i)]['output'])        
        # print len(input), len(output)
        ds.addSample( input, output)
        print ":: Done"

    print "Train"
    net = buildNetwork( 1316, 100, 1316, bias=True, )
    trainer = BackpropTrainer(net, ds)

    terrors = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.33, maxEpochs = 20, continueEpochs = 10 )
    # print terrors[0][-1],terrors[1][-1]
    fo = open("results2.txt", "w")
    for input, expectedOutput in ds:
        output = net.activate(input)
        count = 0
        for q in xrange(0, 1316):
            print output[q], expectedOutput[q]
            if math.floor(output[q]) == math.floor(expectedOutput[q]):
                count+=1    
        m = float(count)/1316.00
        print "{0} ::  {1}".format(count, m)
        fo.write("{0} ::  {1}\n".format(count, m))
コード例 #16
0
ファイル: BPnet.py プロジェクト: hyf9011/robotcat
def getDS(il,ol,trainData):
	DS = SupervisedDataSet(il,ol)
	# 网数据集里面加样本点
	r = trainData.shape[0]
	for i in xrange(0,r-1):
		DS.addSample(trainData[i,:il],trainData[i,il])
	return DS
コード例 #17
0
ファイル: dataset.py プロジェクト: bryanmoore4/ANN_project
class dataset:
    # Initialize the dataset with input and label size
    def __init__(self, inputsize, labelsize):
        self.inputsize = inputsize
        self.labelsize = labelsize
        self.DS = SupervisedDataSet(self.inputsize, self.labelsize)
    
    # Adds data to existing training dataset
    def addTrainingData(self,inputdata, labeldata):
        try:
            if inputdata.size == self.inputsize and labeldata.size == self.labelsize:
                self.DS.appendLinked(inputdata, labeldata)
                return 1
        except AttributeError:
            print "Input error."
            return 0
    
    def getTrainingDataset(self):
        return self.DS
    
    def generateDataSet(self):
        for line in fileinput.input(['data/inputdata3.txt']):
            x = line.split(':')
#            print ft.feature.getImageFeatureVector(x[0]),np.array([int(x[1])])
            self.addTrainingData(ft.feature.getImageFeatureVector(x[0]),np.array([int(x[1])]))
        return 1
コード例 #18
0
ファイル: pybrain.py プロジェクト: AlexanderTek/rep
    def _prepare_dataset(self, X, y, model_type):
        X, y, sample_weight = check_inputs(X, y, sample_weight=None, allow_none_weights=True,
                                           allow_multiple_targets=model_type == 'regression')
        X = self._transform_data(X, y, fit=not self.is_fitted())

        if model_type == 'classification':
            if not self.is_fitted():
                self._set_classes(y)
            target = one_hot_transform(y, n_classes=len(self.classes_))
        elif model_type == 'regression':
            if len(y.shape) == 1:
                target = y.reshape((len(y), 1))
            else:
                # multi regression
                target = y

            if not self.is_fitted():
                self.n_targets = target.shape[1]
        else:
            raise ValueError('Wrong model type')

        dataset = SupervisedDataSet(X.shape[1], target.shape[1])
        dataset.setField('input', X)
        dataset.setField('target', target)

        return dataset
コード例 #19
0
ファイル: suite.py プロジェクト: andrewmalta13/nupic.research
  def train(self, params):
    """
    Train TDNN network on buffered dataset history
    :param params:
    :return:
    """
    # self.net = buildNetwork(params['encoding_num'] * params['num_lags'],
    #                         params['num_cells'],
    #                         params['encoding_num'],
    #                         bias=True,
    #                         outputbias=True)

    ds = SupervisedDataSet(params['encoding_num'] * params['num_lags'],
                           params['encoding_num'])
    history = self.window(self.history, params['learning_window'])

    n = params['encoding_num']
    for i in xrange(params['num_lags'], len(history)):
      targets = numpy.zeros((1, n))
      targets[0, :] = self.encoder.encode(history[i])

      features = numpy.zeros((1, n * params['num_lags']))
      for lags in xrange(params['num_lags']):
        features[0, lags * n:(lags + 1) * n] = self.encoder.encode(
          history[i - (lags + 1)])
      ds.addSample(features, targets)

    trainer = BackpropTrainer(self.net,
                              dataset=ds,
                              verbose=params['verbosity'] > 0)

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
コード例 #20
0
def createBetterSupervisedDataSet(input_file):
	print "Creating a BETTER supervised dataset from", input_file

	ds = SupervisedDataSet(nFeatures, 1)
	answers_by_question = {}

	try:
		with open(training_data_pickle_name, 'rb') as p:
			print "Loading from pickle"
			answers_by_question = pickle.load(p)
			print "Load successful"
			print "Size of answers_by_question:", len(answers_by_question.keys())

	except IOError:
		answers_by_question = loadAnswersByQuestion(input_file)

		print "Saving to a pickle..."
		with open(training_data_pickle_name, 'wb') as p:
			pickle.dump(answers_by_question, p)
		print "Saved to", training_data_pickle_name

	# loop to load stuff into ds
	for qid in answers_by_question:
		for aid in answers_by_question[qid]:
			if aid != 'info':
				ds.addSample( tuple(answers_by_question[qid][aid]['data']), (answers_by_question[qid][aid]['target'], ) )
				# ds.addSample(tuple(ans[1]), (ans[0],))

	return ds, answers_by_question
コード例 #21
0
def build_dataset():
    ds = SupervisedDataSet(2, 1)
    ds.addSample((0, 0), (0,))
    ds.addSample((0, 1), (1,))
    ds.addSample((1, 0), (1,))
    ds.addSample((1, 1), (0,))
    return ds
コード例 #22
0
    def initializeNetwork(self):
        can1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can1.png'), self.encodingDict["can"])
        can2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can2.png'), self.encodingDict["can"])
        can3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can3.png'), self.encodingDict["can"])
        stain1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain1.png'), self.encodingDict["stain"])
        stain2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain2.png'), self.encodingDict["stain"])
        stain3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain3.png'), self.encodingDict["stain"])
        dirt1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt1.png'), self.encodingDict["dirt"])
        dirt2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt2.png'), self.encodingDict["dirt"])
        dirt3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt3.png'), self.encodingDict["dirt"])

        self.trainData.append(can1)
        self.trainData.append(can2)
        self.trainData.append(can3)
        self.trainData.append(stain1)
        self.trainData.append(stain2)
        self.trainData.append(stain3)
        self.trainData.append(dirt1)
        self.trainData.append(dirt2)
        self.trainData.append(dirt3)

        for x in self.trainData:
            x.prepareTrainData()

        self.net = buildNetwork(4, 3, 3, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
        ds = SupervisedDataSet(4, 3)

        for x in self.trainData:
            ds.addSample((x.contours/100.0, x.color[0]/1000.0, x.color[1]/1000.0, x.color[2]/1000.0), x.output)

        trainer = BackpropTrainer(self.net, momentum=0.1, verbose=True, weightdecay=0.01)
        trainer.trainOnDataset(ds, 1000)
        trainer.testOnData(verbose=True)
        print "\nSiec nauczona\n"
コード例 #23
0
ファイル: neuralkinect.py プロジェクト: kepplemr/neuralKinect
class NeuralKinect():
    def __init__(self):
        # Softmax layer -> great for classification networks
        #self.neuralNet = buildNetwork(60, 60, 5, outclass=SoftmaxLayer)
        #self.neuralNet = buildNetwork(60, 60, 5, hiddenclass=TanhLayer)
        #self.neuralNet = buildNetwork(60, 60, 5, bias=True)
        self.neuralNet = buildNetwork(60, 60, 5)
        self.dataSet = SupervisedDataSet(60, 5)

    def trainBackProp(self):
        trainer = BackpropTrainer(self.neuralNet, self.dataSet)
        start = time.time()
        trainer.trainEpochs(EPOCHS)
        end = time.time()
        print("Training time -> " + repr(end-start))
        print(repr(trainer.train()))

    def loadDataSet(self):
        points = []
        for csvFile in glob.iglob("TrainData/*.csv"):
            with open(csvFile, 'rt') as letterSet:
                reader = csv.reader(letterSet)
                header = str(reader.next())
                letter = header[2:3]
                targetStr = header[4:9]
                print("Processing Dataset for letter -> " + letter)
                target = []
                for digit in targetStr:
                    target.append(digit)
                rows = 1
                for row in reader:              
                    for col in row:
                        points.append(col)
                    if rows % 20 == 0:
                        self.dataSet.addSample(points, target)
                        points = []
                    rows += 1
                    
    def processResults(self, output):
        result = ""
        for digit in output:
            if digit > 0.5:
                result += "1"
            else:
                result += "0"
        print("Network result -> " + chr(64+int(result,2)))
                    
    def testNetwork(self):
        points = []
        for csvFile in glob.iglob("TestData/*.csv"):
            with open(csvFile, 'rt') as testPose:
                reader = csv.reader(testPose)
                rows = 1
                for row in reader:
                    for col in row:
                        points.append(col)
                    if rows % 20 == 0:
                        self.processResults(self.neuralNet.activate(points))
                        points = []
                    rows += 1
def get_dataset_for_pybrain_regression(X,y):
	ds = SupervisedDataSet(250,1)
	tuples_X = [tuple(map(float,tuple(x))) for x in X.values]
	tuples_y = [tuple(map(float,(y,))) for y in y.values]
	for X,y in zip(tuples_X,tuples_y):
		ds.addSample(X,y)
	return ds
コード例 #25
0
ファイル: neuralbrain.py プロジェクト: 0x1001/jarvis
    def learn(self,dataset):
        """
            This function trains network

            Input:
            dataset     - Dataset to train network

            Returns:
            Nothing
        """
        from pybrain.supervised.trainers import BackpropTrainer
        from pybrain.datasets import SupervisedDataSet
        from neuraltrainer import NeuralTrainer

        if self._net == None: raise NeuralBrainException("Brain is not configured!")
        if dataset == {}: raise NeuralBrainException("Dataset for learning is empty.")

        data = SupervisedDataSet(self._input,self._output)
        for input,output in dataset.items():
            input = self._normalize(input,self._input)
            output = self._normalize(output,self._output)
            data.addSample(input,output)
            data.addSample(input,output)# For better learning 2x

        trainer = NeuralTrainer(self._net, data)
        trainer.simpleTrain()
コード例 #26
0
def convertDataNeuralNetwork(x, y):
	data = SupervisedDataSet(x.shape[1], 1)
	for xIns, yIns in zip(x, y):
    	data.addSample(xIns, yIns)    
	return data

def NN(xTrain, yTrain, xTest, yTest):
	trainData = convertDataNeuralNetwork(xTrain, yTrain)
	testData = convertDataNeuralNetwork(xTest, yTest)
	fnn = FeedForwardNetwork()
	inLayer = SigmoidLayer(trainData.indim)
	hiddenLayer = SigmoidLayer(5)
	outLayer = LinearLayer(trainData.outdim)
	fnn.addInputModule(inLayer)
	fnn.addModule(hiddenLayer)
	fnn.addOutputModule(outLayer)
	in_to_hidden = FullConnection(inLayer, hiddenLayer)
	hidden_to_out = FullConnection(hiddenLayer, outLayer)
	fnn.addConnection(in_to_hidden)
	fnn.addConnection(hidden_to_out)
	fnn.sortModules()
	trainer = BackpropTrainer(fnn, dataset = trainData, momentum = 0.1, verbose = True, weightdecay = 0.01)

	for i in xrange(10):
	    trainer.trainEpochs(500)
	    
	rmse = percentError(trainer.testOnClassData(dataset = testData), yTest)
	return rmse/100

def main():
	rmse = NN(xTrain, yTrain, xTest, yTest)
	print rmse

if __name__=="__main__":
	main()
コード例 #27
0
def get_train_samples(input_num,output_num):
    '''
    从new_samples文件夹中读图,根据输入数和输出数制作样本,每一原始样本加入随机噪音生成100个样本
    '''
    print 'getsample start.'
    sam_path='./new_samples'
    samples = SupervisedDataSet(input_num,output_num)
    nlist = os.listdir(sam_path)
    t=int(np.sqrt(input_num))
    for n in nlist:
        file = os.path.join(sam_path,n)
        im = Image.open(file)
        im = im.convert('L')
        im = im.resize((t,t),Image.BILINEAR)
        buf = np.array(im).reshape(input_num,1)
        buf = buf<200
        buf = tuple(buf)
        buf1=int(n.split('.')[0])
        buf2=range(output_num)
        for i in range(len(buf2)):
            buf2[i] = 0
        buf2[buf1]=1
        buf2 = tuple(buf2)
        samples.addSample(buf,buf2)
        for i in range(100):
            buf3 = list(buf)
            for j in range(len(buf)/20):
                buf3[np.random.randint(len(buf))] = bool(np.random.randint(2))
            samples.addSample(tuple(buf3),buf2)
    return samples 
コード例 #28
0
ファイル: problem3_predict.py プロジェクト: eprym/EE-239AS
def neural_network(data, target, network):
    DS = SupervisedDataSet(len(data[0]), 1)
    nn = buildNetwork(len(data[0]), 7, 1, bias = True)
    kf = KFold(len(target), 10, shuffle = True);
    RMSE_NN = []
    for train_index, test_index in kf:
        data_train, data_test = data[train_index], data[test_index]
        target_train, target_test = target[train_index], target[test_index]
        for d,t in zip(data_train, target_train):
            DS.addSample(d, t)
        bpTrain = BackpropTrainer(nn,DS, verbose = True)
        #bpTrain.train()
        bpTrain.trainUntilConvergence(maxEpochs = 10)
        p = []
        for d_test in data_test:
            p.append(nn.activate(d_test))
        
        rmse_nn = sqrt(np.mean((p - target_test)**2))
        RMSE_NN.append(rmse_nn)
        DS.clear()
    time = range(1,11)
    plt.figure()
    plt.plot(time, RMSE_NN)
    plt.xlabel('cross-validation time')
    plt.ylabel('RMSE')
    plt.show()
    print(np.mean(RMSE_NN))
コード例 #29
0
ファイル: gnn_pybrain.py プロジェクト: jpodeszwik/openai
    def run_try(self, rand_chance=0, rand_count=0, rand_count_ref=0, render=False):
        ds = SupervisedDataSet(env_size, 1)
        observation = env.reset()

        random_indexes = []

        while len(random_indexes) < rand_count:
            random_index = math.floor(random() * rand_count_ref)
            if random_index not in random_indexes:
                random_indexes.append(random_index)

        for t in range(max_frames):
            if render:
                env.render()
            # print(observation)

            action = 0 if net.activate(observation)[0] < 0 else 1

            if t in random_indexes or random() < rand_chance:
                action = (action + 1) % 1

            ds.addSample(observation, (action,))
            observation, reward, done, info = env.step(action)

            if done:
                print("Episode finished after {} timesteps".format(t + 1))
                break

        if t == max_frames - 1:
            print("Passed!!")
            self.run_try(render=True)

        return t, ds
コード例 #30
0
    def _set_dataset(self, trn_index, tst_index):
        '''
        set the dataset according to the index of the training data and the test data
        Then do feature normalization
        '''
        this_trn = self.tot_descs[trn_index]
        this_tst = self.tot_descs[tst_index]
        this_trn_target = self.tot_target[trn_index]
        this_tst_target = self.tot_target[tst_index]
        
        # get the normalizer
        trn_normalizer = self._getNormalizer(this_trn)
        
        # feature normal and target log for traning data
        trn_normed = self._featureNorm(this_trn, trn_normalizer)
        trn_log_tar = np.log(this_trn_target)
        
        # feature normalization for the test data, with the normalizer of the training data
        tst_normed = self._featureNorm(this_tst, trn_normalizer)
        tst_log_tar = np.log(this_tst_target)
        
        trn_ds_ann = SupervisedDataSet(self.indim, self.outdim)
        trn_ds_ann.setField('input', trn_normed)
        trn_log_tar = trn_log_tar.reshape((trn_log_tar.shape[0],1))
        trn_ds_ann.setField('target', trn_log_tar)
        
        tst_ds_ann = SupervisedDataSet(self.indim, self.outdim)
        tst_ds_ann.setField('input', tst_normed)
        tst_log_tar = tst_log_tar.reshape((tst_log_tar.shape[0],1))
        tst_ds_ann.setField('target', tst_log_tar)

        return trn_ds_ann, tst_ds_ann
コード例 #31
0
arr_val = np.concatenate((arr1[12:24], arr2[12:24]), axis=0)

nEpochs = 200
#We will do all 4 types, with data replication
noise_list = [0.03, 0.05]

#add noise
length_data = len(arr_test)

#the four sets are given separately
set_lengths = ((0, 12), (12, 24), (24, 36), (36, 48), (48, 60))
#third set is separated
#an enchanced array that will contain the noisy data
enh_array = np.empty((1, 6))  #np.zeros((38,5))
#build and populate datasets
ds2 = SupervisedDataSet(5, 3)
for noise in noise_list:
    enh_array = np.empty((1, 6))  #np.zeros((38,5))
    #augment the enhanced array
    for i, m in product(set_lengths, xrange(100)):
        #i is the dataset
        #for each dataset we produce a 100 noise_arrays
        temp_data = arr_test[i[0]:i[1]]
        #generate an array the size of the current sub array, to be added.
        noise_array = np.array([[temp_data[k, 0]] + [temp_data[k, 1]] + [
            temp_data[k, j] +
            temp_data[k, j] * np.random.uniform(-noise, noise)
            for j in xrange(2, 6)
        ] for k in xrange(len(temp_data))])
        enh_array = np.concatenate((enh_array, noise_array), axis=0)
コード例 #32
0
""" A simple example on how to use the GaussianProcess class
in pybrain, for one and two dimensions. """

__author__ = "Thomas Rueckstiess, [email protected]"

from pybrain.auxiliary import GaussianProcess
from pybrain.datasets import SupervisedDataSet
from scipy import mgrid, sin, cos, array, ravel
from pylab import show, figure

ds = SupervisedDataSet(1, 1)
gp = GaussianProcess(indim=1, start=-3, stop=3, step=0.05)    
figure()

x = mgrid[-3:3:0.2]
y = 0.1*x**2 + x + 1
z = sin(x) + 0.5*cos(y)

ds.addSample(-2.5, -1)
ds.addSample(-1.0, 3)
gp.mean = 0

# new feature "autonoise" adds uncertainty to data depending on
# it's distance to other points in the dataset. not tested much yet.
# gp.autonoise = True

gp.trainOnDataset(ds)
gp.plotCurves(showSamples=True) 

# you can also test the gp on single points, but this deletes the
# original testing grid. it can be restored with a call to _buildGrid()
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

neuralNetwork = buildNetwork(2, 3, 1, bias=True)

dataset = SupervisedDataSet(2, 1)

dataset.addSample((0, 0), (0))
dataset.addSample((1, 0), (1))
dataset.addSample((0, 1), (1))
dataset.addSample((1, 1), (0))

trainer = BackpropTrainer(neuralNetwork,
                          dataset=dataset,
                          learningRate=0.01,
                          momentum=0.06)

for i in range(1, 10000):
    error = trainer.train()

    if i % 10000 == 0:
        print("Error in iteration ", i, " is: ", error)
        print(neuralNetwork.activate([0, 0]))
        print(neuralNetwork.activate([1, 0]))
        print(neuralNetwork.activate([0, 1]))
        print(neuralNetwork.activate([1, 1]))

print("\n\nFinal result of XOR:\n")
print(neuralNetwork.activate([0, 0]))
print(neuralNetwork.activate([1, 0]))
コード例 #34
0
    float(45),
    float(45),
    float(45),
    float(45),
    float(45),
    float(45),
    float(45),
    float(45),
    float(45)
]

maxima_saida = max(saidas)
saidas = np.matrix(saidas)
saidas = saidas / maxima_saida

parametros_entrada = SupervisedDataSet(20, 1)

i = 0
for entrada in atributos:
    parametros_entrada.addSample(entrada, [saidas[0, i]])
    i = i + 1

rede_neural = buildNetwork(20, 15, 1, bias=True)
rede_neural.randomize()
treinamento = BackpropTrainer(rede_neural, parametros_entrada, momentum=0.99)
treinamento.trainEpochs(1000)

obj = wocr("../imagens_teste/um.jpg")

atributos_teste = obj.atributos()
atributos_teste = np.matrix(atributos_teste) / maximo_entrada
コード例 #35
0
    # Y = scy.transform(y)
    # X.reshape(-1, 1)
    # Y.reshape(-1, 1)
    X = preprocessing.scale(x_np)
    Y = preprocessing.scale(y_np)
    ######################################
    #setup the dataset (supervised classification training) for neural network
    ######################################
    from pybrain.utilities import percentError
    from pybrain.tools.shortcuts import buildNetwork
    from pybrain.supervised.trainers import BackpropTrainer
    from pybrain.structure.modules import SoftmaxLayer
    from pybrain.datasets.classification import ClassificationDataSet
    from pybrain.datasets import SupervisedDataSet

    ds = SupervisedDataSet(4, 1)
    for i in range(len(X)):
        ds.addSample(X[i], Y[i])
    # #split the dataset
    trainData, testData = ds.splitWithProportion(0.60)

    # ###################################
    # #Creating a Neural Network
    # ###################################
    # # build nerual net with 4 inputs, 5 hidden neuron and 1 output neuron
    net = buildNetwork(4, 5, 1, bias=True)
    trainer = BackpropTrainer(net, trainData)
    train_error = trainer.trainUntilConvergence(dataset=trainData,
                                                maxEpochs=50)

    # #evaluate the error rate on training data
コード例 #36
0
    occurrences = [w[1] for w in sorted_word_dict[-dict_size:]]
    print "These are the 10 most occurring words are"
    print sorted_word_dict[-9:]
    plt.plot(occurrences)
    plt.xlabel('word indices')
    plt.ylabel('occurrences')
    plt.ylim([0, 5000])
    plt.show()

######## Build training set and save to file ############
print "Saving to file..."
#PyBrain has some nice classes to do all this.
from pybrain.datasets import SupervisedDataSet
import numpy as np

DS = SupervisedDataSet(dict_size, 1)

for m_list, target in [[spamlist, 1], [hamlist, 0]]:
    for mail in m_list:
        #each data point is a list (or vector) the size of the dictionary
        wordvector = np.zeros(dict_size)
        #now go through the email and put the occurrences of each word
        #in it's respective spot (i.e. word_dict[word]) in the vector
        for word in mail:
            if word in word_dict:
                wordvector[word_dict[word]] += 1
        DS.appendLinked(np.log(wordvector + 1),
                        [target])  #put word occurrences on a log scale

#TODO: use MySQL instead of csv
DS.saveToFile('dataset.csv')
コード例 #37
0
class NeuralNetwork():
    def __init__(self, hidden_counts, training_list, learning_rate,
                 balance_num):
        self.training_list = training_list
        self.set_network(len(training_list[0]) - 4, hidden_counts, 1)
        self.set_trainer(learning_rate, balance_num)

    def set_network(self, in_count, hidden_counts, out_count):
        assert len(hidden_counts) > 0
        self.in_count = in_count
        self.out_count = out_count
        self.net = FeedForwardNetwork()
        in_layer = LinearLayer(in_count)
        hidden_layers = [SigmoidLayer(count) for count in hidden_counts]
        out_layer = SigmoidLayer(out_count)
        self.net.addInputModule(in_layer)
        for layer in hidden_layers:
            self.net.addModule(layer)
        self.net.addOutputModule(out_layer)
        in_connection = FullConnection(in_layer, hidden_layers[0])
        hidden_connections = [
            FullConnection(layer1, layer2)
            for layer1, layer2 in zip(hidden_layers[0:-1], hidden_layers[1:])
        ]
        out_connection = FullConnection(hidden_layers[-1], out_layer)
        self.net.addConnection(in_connection)
        for connection in hidden_connections:
            self.net.addConnection(connection)
        self.net.addConnection(out_connection)
        self.net.sortModules()

    def set_trainer(self, learning_rate, balance_num):
        self.origin_ds = SupervisedDataSet(self.in_count, self.out_count)
        self.balance_ds = SupervisedDataSet(self.in_count, self.out_count)
        for sample in self.training_list:
            feature = sample[3:-1]
            label = sample[-1]
            self.origin_ds.addSample(feature, label)
            if label == 1:
                for i in range(balance_num):
                    self.balance_ds.addSample(feature, label)
            else:
                self.balance_ds.addSample(feature, label)
        self.learning_rate = learning_rate
        self.balance_num = balance_num
        self.origin_trainer = BackpropTrainer(self.net,
                                              self.origin_ds,
                                              learningrate=learning_rate)
        self.balance_trainer = BackpropTrainer(self.net,
                                               self.balance_ds,
                                               learningrate=learning_rate)

    def origin_train(self):
        return self.origin_trainer.train()

    def pos_train(self):
        return self.pos_trainer.train()

    def balance_train(self):
        return self.balance_trainer.train()

    def predicate_score(self, feature_list):
        return [self.net.activate(feature)[0] for feature in feature_list]

    def predicate_label(self, feature_list):
        score_list = self.predicate_score(feature_list)
        return [1 if score > 0.5 else 0 for score in score_list]

    def evaluate(self):
        predicate = numpy.array(self.predicate_label(self.origin_ds['input']))
        label = self.origin_ds['target'][:, 0].astype('int32')
        assert len(predicate.shape) == 1
        assert len(label.shape) == 1
        positive_true = numpy.logical_and(label, predicate).sum()
        positive_predicate = predicate.sum()
        positive_label = label.sum()
        if positive_predicate == 0: positive_predicate = 1
        if positive_label == 0: positive_label = 1
        p = positive_true / positive_predicate
        r = positive_true / positive_label
        if p * r == 0: f1 = 0
        else: f1 = 2 * p * r / (p + r)
        return positive_true, positive_predicate, positive_label, p, r, f1
コード例 #38
0
#将构建的输出层、隐藏层、输出层加入到fnn中
fnn.addInputModule(inLayer)
fnn.addModule(hiddenLayer)
fnn.addOutputModule(outLayer)

#对各层之间建立完全连接
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)

#与fnn建立连接
fnn.addConnection(in_to_hidden)
fnn.addConnection(hidden_to_out)
fnn.sortModules()

#初始化监督数据集
DS = SupervisedDataSet(x.shape[1], 1)

#将训练的数据及标签加入到DS中
for i in range(len(xTrain)):
    DS.addSample(xTrain[i], yTrain[i])

#采用BP进行训练,训练至收敛,最大训练次数为1000
trainer = BackpropTrainer(fnn, DS, learningrate=0.01, verbose=True)
trainer.trainUntilConvergence(maxEpochs=1000)

#在测试集上对其效果做验证
values = []
for x in xTest:
    values.append(sy.inverse_transform(fnn.activate(x))[0])

#计算RMSE (Root Mean Squared Error)均方差
コード例 #39
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

ds = SupervisedDataSet(2, 1)

#entradas e a saida esperada (DataSet)
ds.addSample((0.8, 0.4), (0.7))
ds.addSample((0.5, 0.7), (0.5))
ds.addSample((1.0, 0.8), (0.95))
ds.addSample((0.5, 0.5), (0.6))

#criando rede neural com 2 neuronios, 4 camadas ocultas e uma saida
nn = buildNetwork(2, 4, 1, bias=True)

#iniciando o treino enviando o DataSet e a rede
trainer = BackpropTrainer(nn, ds)

#treinando com 2000 epocas, retorna a margem de erro
for i in xrange(2000):
    trainer.train()

#depois de treinada, enviamos os parametros e recebemos a saida
while True:

    dormiu = float(raw_input('Dormiu: '))
    estudou = float(raw_input('Estudou: '))

    #envia os valores para a ativação e depois retorna com base no treino
    z = nn.activate((dormiu, estudou))[0] * 10.0
コード例 #40
0
ファイル: testbp3.py プロジェクト: bird0554/pythonforGAN
fnn.addModule(hiddenLayer0)
fnn.addOutputModule(outLayer)

# link three layers
in_to_hidden0 = FullConnection(inLayer, hiddenLayer0)
hidden0_to_out = FullConnection(hiddenLayer0, outLayer)

# add the links to neural network
fnn.addConnection(in_to_hidden0)
fnn.addConnection(hidden0_to_out)

# make neural network come into effect
fnn.sortModules()

# definite the dataset as two input , one output
DS = SupervisedDataSet(2, 1)

# add data element to the dataset
for i in np.arange(199):
    DS.addSample([u[i], y[i]], [y[i + 1]])

# you can get your input/output this way
X = DS['input']
Y = DS['target']

# split the dataset into train dataset and test dataset
dataTrain, dataTest = DS.splitWithProportion(0.8)
xTrain, yTrain = dataTrain['input'], dataTrain['target']
xTest, yTest = dataTest['input'], dataTest['target']

# train the NN
コード例 #41
0
Created on Fri Sep 08 23:08:48 2017

@author: pedro
"""
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
#import matplotlib.pyplot as plt

arquivo = open('dadosteste.txt', 'r')
texto = arquivo.readlines()
separado = []
for linha in texto:
    separado.append(linha.split())

ds = SupervisedDataSet(2, 1)

for i in range(len(separado)):
    a = float(separado[i][0])
    b = float(separado[i][1])
    c = float(separado[i][2])
    ds.addSample((a, b), (c))

nn = buildNetwork(2, 4, 1, bias=True)  #2 entradas, 4 ocultos e um saida

trainer = BackpropTrainer(nn, ds)
curva_treinamento = []
for i in xrange(2000):
    curva_treinamento.append(trainer.train())
    print 'Epoca: ', i, 'Erro: ', curva_treinamento[i]
#plt.plot(curva_treinamento)
コード例 #42
0
def simulation(params):
    ntry = params["ntry"]
    maxEpk = params["maxEpok"]
    normalize_data = params["normalize"]
    scale_data = params["scale"]
    crossvalidation_pct = params["cross_validation_percentage"]
    learningRate = params["algorithm"]["params"]["learning_rate"]
    moment = params["algorithm"]["params"]["momentum"]

    dataXAll, dataYAll, cotationAll = buildDataset(
        params["dataset"]["src"], params["dataset"]["features"])
    nfeatures = len(dataXAll[0])
    # stat variable init
    winrateLst = list()
    moneyLst = list()
    cotationMeanLst = list()
    moneyBase = params["start_money"]
    moneyMin = moneyBase
    moneyMax = moneyBase
    pct_bet = params["percentage_bet"]
    # / stat variable init
    for n in range(0, ntry):
        ds = SupervisedDataSet(nfeatures, 3)
        dataX = list(dataXAll)
        dataY = list(dataYAll)
        cotations = list(cotationAll)

        # # crossvalidation data construction RANDOM PICK
        # datapX = list()
        # datapY = list()
        # cotationpHDA = list()
        # for i in range(0, int(crossvalidation_pct * len(dataX))):
        #     popi = random.randint(0, len(dataX) - 1)
        #     datapX.append(dataX[popi])
        #     datapY.append(dataY[popi])
        #     cotationpHDA.append(cotations[popi])
        #     dataX.pop(popi)
        #     dataY.pop(popi)
        #     cotations.pop(popi)
        # # / crossvalidation data construction

        # crossvalidation data construction PICK LAST
        datapX = list()
        datapY = list()
        cotationpHDA = list()
        extracti = int(len(dataX) - (crossvalidation_pct * len(dataX)))
        datapX = dataX[extracti:len(dataX)]
        datapY = dataY[extracti:len(dataY)]
        cotationpHDA = cotations[extracti:len(cotations)]
        dataX = dataX[0:extracti]
        dataY = dataY[0:extracti]
        cotations = cotations[0:extracti]
        # / crossvalidation data construction

        # scalarization && normalization -->
        # http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html &&
        # http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.Normalizer.html
        if scale_data == True:
            scalizer = preprocessing.StandardScaler().fit(dataX)
            dataX = scalizer.transform(dataX)
        if normalize_data == True:
            normalizer = preprocessing.Normalizer().fit(dataX)
            dataX = normalizer.transform(dataX)
        # / scalarization && normalization

        # training dataset construction
        for i in range(0, len(dataX)):
            ds.addSample(dataX[i], dataY[i])
        # / training dataset construction

        # nn && trainer construction
        net = buildNetwork(ds.indim, (ds.indim + ds.outdim) / 2,
                           ds.outdim,
                           bias=True,
                           outclass=SoftmaxLayer)  # building the n
        trainer = BackpropTrainer(net,
                                  ds,
                                  learningrate=learningRate,
                                  momentum=moment,
                                  verbose=False)  # building the trainer
        # / nn && trainer construction

        # training
        trainer.trainUntilConvergence(
            maxEpochs=maxEpk)  # Train, until convergence
        # for epoch in range(0,1000):
        #         trainer.train()
        # / training

        # cross validation
        win = 0
        money = moneyBase
        cotationMean = 0.
        for i in range(0, len(datapX)):
            unit_bet = pct_bet * money if pct_bet * money > 0.5 else 0
            toPredict = datapX[i]
            if scale_data == True:
                toPredict = scalizer.transform(toPredict)
            if normalize_data == True:
                toPredict = normalizer.transform(toPredict)[0]
            prediction = net.activate(toPredict)
            indexp, valuep = max(enumerate(prediction),
                                 key=operator.itemgetter(1))
            indexe, valuee = max(enumerate(datapY[i]),
                                 key=operator.itemgetter(1))
            money = money - unit_bet  # bet unit_bet on the prediction (money is lost)
            cotationMean += cotationpHDA[i][indexp]
            if indexp == indexe:
                win = win + 1
                money = money + (
                    unit_bet * cotationpHDA[i][indexp]
                )  # on good prediction, money increased by unit_bet * predicted issue cotation
            # in crossvalidation money min/max retrieve
            moneyMin = money if money < moneyMin else moneyMin
            moneyMax = money if money > moneyMax else moneyMax
            # / in crossvalidation money min/max retrieve
            # / cross validation
        cotationMean = cotationMean / float(len(datapX))
        cotationMeanLst.append(cotationMean)
        winrate = win / float(len(datapX))
        winrateLst.append(winrate)
        moneyLst.append(money)
    winrateFinal = sum(winrateLst) / float(ntry)
    winrateMin = min(winrateLst)
    winrateMax = max(winrateLst)
    winrateMedian = numpy.median(numpy.array(winrateLst))
    winrateStdDev = numpy.std(numpy.array(winrateLst))
    moneyFinal = sum(moneyLst) / float(ntry)
    moneyMinSeason = min(moneyLst)
    moneyMaxSeason = max(moneyLst)
    moneyMedian = numpy.median(numpy.array(moneyLst))
    moneyStdDev = numpy.std(numpy.array(moneyLst))
    cotationMeanFinal = sum(cotationMeanLst) / float(ntry)
    cotationMeanMin = min(cotationMeanLst)
    cotationMeanMax = max(cotationMeanLst)
    cotationMeanMedian = numpy.median(numpy.array(cotationMeanLst))
    cotationMeanStdDev = numpy.std(numpy.array(cotationMeanLst))
    results = {
        "win_percentage": {
            "min": winrateMin,
            "max": winrateMax,
            "mean": winrateFinal,
            "median": winrateMedian,
            "standard_deviation": winrateStdDev,
            "lst": winrateLst
        },
        "money_during_cross_validation": {
            "min": moneyMin,
            "max": moneyMax
        },
        "money_post_cross_validation": {
            "min": moneyMinSeason,
            "max": moneyMaxSeason,
            "mean": moneyFinal,
            "median": moneyMedian,
            "standard_deviation": moneyStdDev,
            "lst": moneyLst
        },
        "mean_cotation": {
            "min": cotationMeanMin,
            "max": cotationMeanMax,
            "mean": cotationMeanFinal,
            "median": cotationMeanMedian,
            "standard_deviation": cotationMeanStdDev,
            "lst": cotationMeanLst
        }
    }
    print results
    return results
コード例 #43
0
Num = np.zeros((NumOfData, LengthOfPicture, WidthOfPicture), dtype=np.uint8)
for i in range(NumOfData):
    img = ImaPreprocess(i)
    Num[i] = GetImageMatrix(img)

Label = np.zeros((NumOfData, 1, 10), dtype=np.uint8)
for i in range(NumOfData):
    trans = [0 for j in range(10)]
    trans[i % 10] = 1
    Label[i] = trans

# print Num[21], Label[21]
# print Num[38], Label[38]

net = buildNetwork(LengthOfPicture * WidthOfPicture, HiddenLayerNum, 10)
ds = SupervisedDataSet(LengthOfPicture * WidthOfPicture, 10)
for i in range(NumOfData):
    ds.addSample(Num[i].reshape((LengthOfPicture * WidthOfPicture, )),
                 Label[i])
trainer = BackpropTrainer(net, ds)


def CheckTraining():
    print 'Training:',
    ErrorTrain = 0
    for i in range(NumOfData):
        # print net.activate(Num[i].reshape((LengthOfPicture * WidthOfPicture, )))
        # print net.activate(Num[i].reshape((LengthOfPicture * WidthOfPicture, ))).argmax(),
        # if i % 10 == 9:
        #     print '\n',
        if (i % 10 != net.activate(Num[i].reshape(
コード例 #44
0
ファイル: micex-vanga.py プロジェクト: rotanov/way-station
minPrice = []
closePrice = []
volumeSold = []

for row in reader:
    x.append(row[0] + row[1])
    openPrice.append(row[2])
    maxPrice.append(row[3])
    minPrice.append(row[4])
    closePrice.append(row[5])
    volumeSold.append(row[6])

windowSize = 9 * 4

net = buildNetwork(windowSize, 8, 4, 2, 1, hiddenclass=SigmoidLayer)
ds = SupervisedDataSet(windowSize, 1)
# , recurrent=True

n = windowSize * 8

for i in range(n):
    if i >= windowSize:
        ds.addSample(
            ([closePrice[i - (windowSize - j)] for j in range(windowSize)]),
            (closePrice[i]))

trainer = BackpropTrainer(
    net, ds)  #, learningrate=0.01, lrdecay=1.0, momentum=0.0, weightdecay=0.0)
# trainer = RPropMinusTrainer(net, dataset=ds)
for i in range(100):
    print('error: ', trainer.train(), '\n')
コード例 #45
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
import matplotlib.pyplot as plt
import sys
import pickle

train_interval = sys.argv[1].split(',')
train_begin = int(train_interval[0])
train_end = int(train_interval[1])

test_interval = sys.argv[2].split(',')
test_begin = int(test_interval[0])
test_end = int(test_interval[1])

ds = SupervisedDataSet(50, 1)

test_params = []
test_targets = []

with open("data/btc_dataset_sample.csv", "r") as btc_dataset:
    header = btc_dataset.readline()
    count = 0
    for line in btc_dataset.readlines():
        line = line.replace('\n', '')
        values = line.split(';')
        if count >= train_begin and count <= train_end:
            ds.addSample(values[:-1], values[-1])
        elif count >= test_begin and count <= test_end:
            test_params.append(values[:-1])
            test_targets.append(values[-1])
コード例 #46
0
ファイル: finalproject.py プロジェクト: zyteka/ForgeryML
def main():
	#Prompt user for file and directory name
	io = IOclass()

	#open input file
	filePath = "input/" + io.filename
	inputFile = open(filePath, "r")

	#FOR DEBUGGING (print input file data) ----------------------------------------
	#for line in inputFile:
	#	print line
	#------------------------------------------------------------------------------

	if io.authorsWorks.lower() is 'none':
		print "no directory selected"
	else:
		movePassagesToDirectory(io)

	dir = os.path.dirname(os.path.realpath(__import__("__main__").__file__))
	dir += "/bin/"
	featureList = []
	classifierList = []
	for filename in os.listdir(dir):
		if filename == "database":
			continue;
		authorFile = open(dir + filename, "r")
		for line in authorFile:
			authorVec = []
			for feature in line.split(","):
				authorVec.append(float(feature))
			featureList.append(authorVec)
			if filename == io.authorName + ".txt":
				classifierList.append(1)
			else:
				classifierList.append(0)

	inputVector = passageToFeature(filePath)


	print("Support Vector Machine:\n")
	from sklearn import svm

	print("	Creating...\n")
	train1 = svm.SVC(kernel='rbf')

	print("	Training...\n")
	train1.fit(featureList, classifierList)

	print("	Predicting...\n")

	result = train1.predict([inputVector])
	if result ==0:
		print("	Result: "+"Forgery")
	else:
		print("	Result: "+"Legit")
	score=train1.score(featureList, classifierList)

	print("		Mean accuracy of the SVM (training set): "+str(score)+'\n')

	print("Nueral Network:\n")
	from pybrain.tools.shortcuts import buildNetwork

	print("	Creating...\n")
	from pybrain.structure import TanhLayer
	net = buildNetwork(len(featureList[0]), len(featureList[0])+1, 1, hiddenclass=TanhLayer)

	from pybrain.datasets import SupervisedDataSet

	#size= amount of features per feature vector
	ds = SupervisedDataSet(len(featureList[0]), 1)

	for item, classifier in zip(featureList,classifierList):
		ds.addSample(tuple(item),(classifier,))

	
	print("	Training...\n")
	from pybrain.supervised.trainers import BackpropTrainer

	trainer = BackpropTrainer(net, ds)

	NUM_EPOCHS=100

	for i in range(NUM_EPOCHS):
		error = trainer.train()
	error = trainer.train()
	print "Epoch: %d, Error: %7.4f" % (50, error)

	print("	Predicting...\n")
	result = net.activate(inputVector)
	print ("	Result: "+str(result)[1:-1] + "% a forgery")
コード例 #47
0
import time
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised import BackpropTrainer
from pybrain.structure.modules import SigmoidLayer
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.tools.customxml.networkwriter import NetworkWriter

trainingDataset = SupervisedDataSet(21, 1)

f_TrainingTime = open('training_time.txt', 'a')
f_TestingTime = open('testing_time.txt', 'a')

f_Training = open ('ISCXTraining.txt', 'r')

trainingTime = time.time()

line = f_Training.readline()
for line in f_Training.xreadlines():
	allData = line.strip().split(' ')

	inputData = [float(x) for x in allData[2:]]
	outputData = int(allData[1]) - 1

	if outputData == 0:
		outputData = 0.1
	else:
		outputData = 0.9

	inData = tuple(inputData)
	outData = tuple([outputData])
コード例 #48
0
ファイル: main.py プロジェクト: chenzhikuo1/OCR-Python
saidas.append([1, 0, 0, 0])
saidas.append([1, 0, 0, 0])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas.append([1, 0, 0, 1])
saidas = np.matrix(saidas)

contador = 0
parametros_entrada = SupervisedDataSet(22, 4)

for entrada in dados_entrada_normatizados:
    parametros_entrada.addSample(entrada, saidas[contador])
    contador = contador + 1

rede_neural = buildNetwork(22, 10, 4, bias=True, outputbias=True)

treinamento = BackpropTrainer(rede_neural, parametros_entrada, momentum=0.9)

treinamento.trainEpochs(1000)

objeto = WilsonPDI.wocr("../imagens_teste/um.jpg")

atributos_teste = []
atributos_teste.append(objeto.atributos())
コード例 #49
0
x_train = [[0, 0], [0, 1], [1, 0], [1, 1]]
y_train = [[0], [0], [0], [1]]
# OR
x_train = [[0, 0], [0, 1], [1, 0], [1, 1]]
y_train = [[0], [1], [1], [1]]
# XOR
x_train = [[0, 0], [0, 1], [1, 0], [1, 1]]
y_train = [[0], [1], [1], [0]]
# A NEURAL NETWORK IN 11 LINES OF PYTHON (https://iamtrask.github.io/2015/07/12/basic-python-network/)
x_train = [[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]]  # input dataset
y_train = [[0], [0], [1], [1]]  # output dataset

# Prepare a dataset
input_size = 3
target_size = 1
ds = SupervisedDataSet(input_size, target_size)
ds.setField('input', x_train)
ds.setField('target', y_train)

# Apparently Y needs to be of shape (n,1) as opposed to (n,) so first we reshape it:
#y_train = y_train.reshape(-1, 1)

# And to train a network:
hidden_size = 100  # arbitrarily chosen

net = buildNetwork(input_size, hidden_size, target_size, bias=True)
trainer = BackpropTrainer(net, ds)

trainer.trainUntilConvergence(verbose=True,
                              validationProportion=0.15,
                              maxEpochs=1000,
コード例 #50
0
# 建立三层之间的连接
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)

# 将连接加入神经网络
fnn.addConnection(in_to_hidden)
fnn.addConnection(hidden_to_out)

# 让神经网络可用
fnn.sortModules()

from pybrain.supervised.trainers import BackpropTrainer

# 定义数据集的格式是三维输入,一维输出
DS = SupervisedDataSet(3,1)



# 往数据集内加样本点
# 假设x1,x2,x3是输入的三个维度向量,y是输出向量,并且它们的长度相同

n = 10000
x1 = []
x2 = []
x3 = []
y = []
for i in range(0 , n):
  x1.append(random.random())
  x2.append(random.random())
  x3.append(random.randrange(0, 5, 1))
コード例 #51
0
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 17 13:55:09 2017

@author: Administrator
"""

from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import LSTMLayer,LinearLayer

net = buildNetwork(3,3,3, hiddenclass=LSTMLayer,outclass=LinearLayer, bias=True, recurrent=True)
dataSet = SupervisedDataSet(3, 3)
dataSet.addSample((0, 0, 0), (0, 0, 0))
dataSet.addSample((1, 1, 1), (0, 0, 0))
dataSet.addSample((1, 0, 0), (1, 0, 0))
dataSet.addSample((0, 1, 0), (0, 1, 0))
dataSet.addSample((0, 0, 1), (0, 0, 1))

trainer = BackpropTrainer(net, dataSet)
trained = False
acceptableError = 0.001

howmanytries = 0
# train until acceptable error reached
while (trained == False) and (howmanytries < 1000):
    error = trainer.train()
    if error < acceptableError :
        trained = True
    else:
コード例 #52
0
# coding=utf-8
#! /usr/bin/python

from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

fnn = buildNetwork(4, 3, 2, 1, bias=True)

ds = SupervisedDataSet(4, 1)

fin = open("train")
train = []
train_label = []
for line in fin:
    line_list = line.strip().split(' ')
    if len(line_list) != 5:
        continue
    a = [line_list[x] for x in range(4)]
    b = [line_list[4]]
    ds.addSample(a, b)

trainer = BackpropTrainer(fnn, ds)
trainer.train()

fin = open("test")
test = SupervisedDataSet(4, 1)
for line in fin:
    line_list = line.strip().split(' ')
    if len(line_list) != 5:
        continue
コード例 #53
0
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""

import os
from PIL import Image
from pybrain import SigmoidLayer
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer

NET = buildNetwork(768, 20, 1, bias=True, hiddenclass=SigmoidLayer)
DATASET = SupervisedDataSet(768, 1)


def get_histogram(img_path):
    """
    Loads images contaied in an spesific path and returns its histogram.
    """
    print 'Processing file: %s' % img_path
    img = Image.open(img_path)
    histogram = img.histogram()
    if len(histogram) != 768:
        raise IOError("Image is not in RGB mode.")
    else:
        return histogram

コード例 #54
0
# -*- coding: utf-8 -*-
'''
@author:Zhukun Luo
Jiangxi university of finance and economics
'''
import numpy as np
import pybrain
from chapter8 import create_trainset
from chapter8.create_trainset import x_train,y_train,x,y,x_test,y_test
from pybrain.datasets import  SupervisedDataSet
import chapter8
training= SupervisedDataSet(x.shape[1],y.shape[1])
for i in range(x_train.shape[0]):
    training.addSample(x_train[i], y_train[i])
testing= SupervisedDataSet(x.shape[1],y.shape[1])
for i in range(x_test.shape[0]):
    training.addSample(x_test[i], y_test[i])
from pybrain.tools.shortcuts import buildNetwork
net= buildNetwork(x.shape[1],100,y.shape[1],bias=True)
from pybrain.supervised.trainers import BackpropTrainer
trainer=BackpropTrainer(net,training,learningrate=0.01,weightdecay=0.01)
trainer.trainEpochs(epochs=20)#固定运行20步
predictions=trainer.testOnClassData(dataset=testing)
from sklearn.metrics import f1_score
print("F-sore:{0:.2f}".format(*f1_score(y_test.argmax((axis=1))),predictions))

    
    
コード例 #55
0
    print(" Temperature (in K) = " + str(current_temperature) +
          "\n atmospheric pressure (in hPa unit) = " + str(current_pressure) +
          "\n humidity (in percentage) = " + str(current_humidiy) +
          "\n description = " + str(weather_description))

else:
    print(" City Not Found ")

# AI - WIP
import simplejson as json
import math
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.tools.xml.networkwriter import NetworkWriter
from pybrain.tools.xml.networkreader import NetworkReader
ds = SupervisedDataSet(9, 3)

data = open("Z:\\sevendata.json", "r").read()
data = json.loads(data)
len = len(data)

for x in range(len):
    try:
        ds.addSample(
            (data[x]["main"]["temp"], data[x]["main"]["humidity"],
             data[x]["main"]["pressure"], data[x - 1]["main"]["temp"],
             data[x - 1]["main"]["humidity"], data[x - 1]["main"]["pressure"],
             data[x - 2]["main"]["temp"], data[x - 2]["main"]["humidity"],
             data[x - 2]["main"]["pressure"]),
            (data[x + 24]["main"]["temp"], data[x + 48]["main"]["temp"],
             data[x + 72]["main"]["temp"]))
コード例 #56
0
N = 10
print 'make network', N, 'neurons'
XYnet = buildNetwork(N, N, N, hiddenclass=TanhLayer)

print 'build Excitator vector field dataset'


def vf(x, mu=0.4):
    dx1 = mu * (x[0] - x[0]**3 / 3.0 + x[1]) * 2
    dx2 = mu * (1.05 - x[0]) / 2
    dxr = -x[2:]
    return hstack((dx1, dx2, dxr))


figure()
XYds = SupervisedDataSet(N, N)
for i in range(10):
    x = (rand(N) - 0.5) * 4
    xs = [x.copy()]
    for j in range(500):
        dx = vf(x)
        XYds.addSample(x.copy(), dx.copy())
        x += 0.1 * (dx + randn(N) * 1.0)
        xs.append(x.copy())
    xs = array(xs)
    plot(xs[:, 0], xs[:, 1], 'k', alpha=0.1)

print 'using backprop to train on vector field'
XYtrainer = BackpropTrainer(XYnet, XYds)
i, err = 0, XYtrainer.train()
while err > 0.001 and i < 100:
コード例 #57
0
def load_dataset():
    open_filename = tkFileDialog.askopenfilename()
    global ds
    ds = SupervisedDataSet.loadFromFile(open_filename)
コード例 #58
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

ds = SupervisedDataSet(15, 1)

ds.addSample((1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1), 1)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0), 3)
ds.addSample((1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1), 4)
ds.addSample((1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1), 5)
ds.addSample((1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1), 6)
ds.addSample((1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1), 7)
ds.addSample((0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0), 8)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0), 9)
ds.addSample((1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0), 10)
ds.addSample((1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1), 11)
ds.addSample((0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0), 12)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0), 13)
ds.addSample((0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0), 14)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), 15)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0), 16)
ds.addSample((0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0), 17)
ds.addSample((1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1), 18)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1), 19)
ds.addSample((0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1), 20)
ds.addSample((0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0), 21)
ds.addSample((0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0), 22)
ds.addSample((1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1), 23)
ds.addSample((0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1), 24)
ds.addSample((0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0), 25)
ds.addSample((0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0), 26)
コード例 #59
0
    l_inputs[l_num] = l_map.add_input("/input/" + str(l_num + int(10)), 1, 'f',
                                      None, 0, 100.0, h)
    #	l_inputs[l_num]=l_map.add_input("/input/"+str(l_num),'f',h,None,0,100.0)
    l_map.poll(0)
    print("creating input", "/input/" + str(l_num + int(10)))
#	print ("creating input", "/input/"+str(l_num))

#create mapper signals (outputs)
for l_num in range(num_outputs):
    #	l_outputs[l_num]=l_map.add_output("/output/"+str(l_num+int(10)),'f',None,0,1)
    l_outputs[l_num] = l_map.add_output("/output/" + str(l_num), 1, 'f', None,
                                        0, 1)
    l_map.poll(0)
    #	print ("creating output","/output/"+str(l_num+int(10)))
    print("creating output", "/output/" + str(l_num))

#create network
#net = buildNetwork(num_inputs,num_hidden,num_outputs,bias=True, hiddenclass=GaussianLayer, outclass=GaussianLayer)
net = buildNetwork(num_inputs, num_hidden, num_outputs, bias=True)
#create dataSet
ds = SupervisedDataSet(num_inputs, num_outputs)

#while (True):

ontimer()
#master.after(500, ontimer)
master.protocol("WM_DELETE_WINDOW", master.quit)
master.mainloop()
master.destroy()
del master
コード例 #60
0
class ANN:
    def __init__(self):
        self.name = "ANN"

    def getParams(self):
        return self.in_to_hidden.params, self.hidden_to_out.params

    def create_network(self, nFeatures, hidden1Size=20, nClasses=1):
        # create network object
        self.ffn = FeedForwardNetwork()

        # create layer objects
        inLayer = LinearLayer(nFeatures, name="input")
        hiddenLayer = SigmoidLayer(hidden1Size, name="hidden1")
        #hiddenLayer2 = SigmoidLayer(hidden2Size, name="hidden2")
        outLayer = LinearLayer(nClasses, name="output")

        # add layers to feed forward network
        self.ffn.addInputModule(inLayer)
        self.ffn.addModule(hiddenLayer)
        #self.ffn.addModule(hiddenLayer2)
        self.ffn.addOutputModule(outLayer)

        # add bias unit to layers
        self.ffn.addModule(BiasUnit(name='bias'))

        # establish connections between layers
        self.in_to_hidden = FullConnection(inLayer, hiddenLayer)
        #hidden_to_hidden = FullConnection(hiddenLayer, hiddenLayer2)
        self.hidden_to_out = FullConnection(hiddenLayer, outLayer)

        # print "into hidden: {}".format(len(in_to_hidden.params))
        # print "into out: {}".format(len(hidden_to_out.params))

        # add connections to network
        self.ffn.addConnection(self.in_to_hidden)
        #self.ffn.addConnection(hidden_to_hidden)
        self.ffn.addConnection(self.hidden_to_out)

        # necessary, sort layers into correct/certain order
        self.ffn.sortModules()

        # dataset object
        self.train_ds = SupervisedDataSet(nFeatures, nClasses)
        self.validate_ds = SupervisedDataSet(nFeatures, nClasses)

    # train network
    def train(self, TrainX, TrainY, ValidateX, ValidateY):
        # clear old dataset
        self.train_ds.clear()
        self.validate_ds.clear()

        # add data to dataset object (ds)
        for i in range(TrainX.shape[0]):
            self.train_ds.addSample(TrainX[i], TrainY[i])

        for i in range(ValidateX.shape[0]):
            self.validate_ds.addSample(ValidateX[i], ValidateY[i])

        # randomiz weights
        self.ffn.randomize()

        # Backprop trainer object
        self.trainer = BackpropTrainer(self.ffn,
                                       learningrate=.0775,
                                       momentum=.1)
        try:
            with Timer() as t:
                self.train_errors, self.val_errors \
                    = self.trainer.trainUntilConvergence(trainingData=self.train_ds, \
                                                         validationData=self.validate_ds, \
                                                         maxEpochs=500, \
                                                         continueEpochs=10)

            return self.train_errors, self.val_errors
        except:
            print "Error occured while training model in ANN."

        #finally:
        #    print("ANN.py - Time to trainUntilConvergence: {:.03f} sec.".format(t.interval))

        #return 'ANN'

    # predict depenent variable for dataset
    def predict(self, data):
        # if only make prediction for one sample
        if (len(data.shape) == 1):
            return self.ffn.activate(data)
        else:
            outputs = np.zeros(data.shape[0])
            for i in range(data.shape[0]):
                outputs[i] = self.ffn.activate(data[i])
            return outputs