Ejemplo n.º 1
0
	def computeModel(self, path, user):
		# Create a supervised dataset for training.
		trndata = SupervisedDataSet(24, 1)
		tstdata = SupervisedDataSet(24, 1)
		
		#Fill the dataset.
		for number in range(0,10):
			for variation in range(0,7):
				# Pass all the features as inputs.
				trndata.addSample(self.getSample(user, number, variation),(user.key,))
				
			for variation in range(7,10):
				# Pass all the features as inputs.
				tstdata.addSample(self.getSample(user, number, variation),(user.key,))
				
		# Build the LSTM.
		n = buildNetwork(24, 50, 1, hiddenclass=LSTMLayer, recurrent=True, bias=True)

		# define a training method
		trainer = BackpropTrainer(n, dataset = trndata, momentum=0.99, learningrate=0.00002)

		# carry out the training
		trainer.trainOnDataset(trndata, 2000)
		valueA = trainer.testOnData(tstdata)
		print '\tMSE -> {0:.2f}'.format(valueA)
		self.saveModel(n, '.\NeuralNets\SavedNet_%d' %(user.key))
		
		return n
Ejemplo n.º 2
0
  def train(self, params):
    """
    Train TDNN network on buffered dataset history
    :param params:
    :return:
    """
    # self.net = buildNetwork(params['encoding_num'] * params['num_lags'],
    #                         params['num_cells'],
    #                         params['encoding_num'],
    #                         bias=True,
    #                         outputbias=True)

    ds = SupervisedDataSet(params['encoding_num'] * params['num_lags'],
                           params['encoding_num'])
    history = self.window(self.history, params['learning_window'])

    n = params['encoding_num']
    for i in xrange(params['num_lags'], len(history)):
      targets = numpy.zeros((1, n))
      targets[0, :] = self.encoder.encode(history[i])

      features = numpy.zeros((1, n * params['num_lags']))
      for lags in xrange(params['num_lags']):
        features[0, lags * n:(lags + 1) * n] = self.encoder.encode(
          history[i - (lags + 1)])
      ds.addSample(features, targets)

    trainer = BackpropTrainer(self.net,
                              dataset=ds,
                              verbose=params['verbosity'] > 0)

    if len(history) > 1:
      trainer.trainEpochs(params['num_epochs'])
Ejemplo n.º 3
0
def getDS(il,ol,trainData):
	DS = SupervisedDataSet(il,ol)
	# 网数据集里面加样本点
	r = trainData.shape[0]
	for i in xrange(0,r-1):
		DS.addSample(trainData[i,:il],trainData[i,il])
	return DS
Ejemplo n.º 4
0
class Brain:
	def __init__(self, hiddenNodes = 30):
		# construct neural network 
		self.myClassifierNet = buildNetwork(12, hiddenNodes, 1, bias=True, hiddenclass=TanhLayer) #parameters to buildNetwork are inputs, hidden, output
		# set up dataset
		self.myDataset = SupervisedDataSet(12, 1)
		self.myClassifierTrainer = BackpropTrainer(self.myClassifierNet, self.myDataset)

	def addSampleImageFromFile(self, imageFile, groupId):
		"adds a data sample from an image file, including needed processing"
		myImage = Image.open(imageFile)
		self.myDataset.addSample(twelveToneParallel(myImage), (groupId,))

	def train(self):
		#myClassifierTrainer.trainUntilConvergence() #this will take forever (possibly literally in the pathological case)
		for i in range(0, 15):
			self.myClassifierTrainer.train() #this may result in an inferior network, but in practice seems to work fine

	def save(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'w')
		pickle.dump(self.myClassifierNet, saveFile)
		saveFile.close()

	def load(self, saveFileName="recognizernet.brain"):
		saveFile = open(saveFileName, 'r')
		myClassifierNet = pickle.load(saveFile)
		saveFile.close()

	def classify(self, fileName):
		myImage = Image.open(fileName)
		if self.myClassifierNet.activate(twelveToneParallel(myImage)) < 0.5:
			return 0
		else:
			return 1
Ejemplo n.º 5
0
def loadDataSet(ds_file):
  global X, Y
  BB = set()
  aaa = {}
  ds = SupervisedDataSet(400, 10)
  #ds = SupervisedDataSet(1024, 5)
  with open(ds_file,"rb") as f:
    lines = f.readlines()
    for line in lines:
      l = [float(a) for a in line.strip().split(',')]
      #A = [float(1.0)] + l[:-1]
      A = l[:-1]
      X.append(A)
      B = int(l[-1])
      #BB.update([B])
      #for aa,bb in enumerate(BB):
      #  aaa[bb] = aa
      #print aaa
      #Y.append(aaa[bb])
      Y.append(B)
      C = []
      for i in range(10):
        C.append(int(1) if i==B or (i==0 and B==10) else int(0))
      ds.addSample(tuple(A), tuple(C))
  return ds
Ejemplo n.º 6
0
def pybrain_high():
	back=[]
	alldate=New_stock.objects.filter().exclude(name='CIHKY')[0:100]
	wholelen=len(alldate)
	test=New_stock.objects.filter(name__contains="CIHKY")
	testlen=len(test)
	# test dateset
	testdata= SupervisedDataSet(5, 1)
	testwhole=newalldate(test,testlen)
	for i in testwhole:
		testdata.addSample((i[0],i[2],i[3],i[4],i[5]), (0,))	
	# 实验 dateset
	data= SupervisedDataSet(5, 1)
	wholedate=newalldate(alldate,wholelen)
	for i in wholedate:
		data.addSample((i[0],i[2],i[3],i[4],i[5]), (i[1]))	
	#print testwhole
	# 建立bp神经网络
	net = buildNetwork(5, 3, 1,bias=True,hiddenclass=TanhLayer, outclass=SoftmaxLayer)
	
	trainer = BackpropTrainer(net,data)
	trainer.trainEpochs(epochs=100)
	# train and test the network
#	print trainer.train()
	trainer.train()
	print 'ok'
	out=net.activateOnDataset(testdata)
	for j in  test:
                back.append((j.high))
	print back
	print out
	backout=backnormal(back,out)
	print 'okokokoko'
	print backout # 输出22的测试集合
	return out 
Ejemplo n.º 7
0
    def learn(self):
        # convert reinforcement dataset to NFQ supervised dataset
        supervised = SupervisedDataSet(self.module.network.indim, 1)

        for seq in self.dataset:
            lastexperience = None
            for state, action, reward in seq:
                if not lastexperience:
                    # delay each experience in sequence by one
                    lastexperience = (state, action, reward)
                    continue

                # use experience from last timestep to do Q update
                (state_, action_, reward_) = lastexperience

                Q = self.module.getValue(state_, action_[0])

                inp = r_[state_, one_to_n(action_[0], self.module.numActions)]
                tgt = Q + 0.5*(reward_ + self.gamma * max(self.module.getActionValues(state)) - Q)
                supervised.addSample(inp, tgt)

                # update last experience with current one
                lastexperience = (state, action, reward)

        # train module with backprop/rprop on dataset
        trainer = RPropMinusTrainer(self.module.network, dataset=supervised, batchlearning=True, verbose=False)
        trainer.trainUntilConvergence(maxEpochs=self.maxEpochs)
Ejemplo n.º 8
0
class NeuralKinect():
    def __init__(self):
        # Softmax layer -> great for classification networks
        #self.neuralNet = buildNetwork(60, 60, 5, outclass=SoftmaxLayer)
        #self.neuralNet = buildNetwork(60, 60, 5, hiddenclass=TanhLayer)
        #self.neuralNet = buildNetwork(60, 60, 5, bias=True)
        self.neuralNet = buildNetwork(60, 60, 5)
        self.dataSet = SupervisedDataSet(60, 5)

    def trainBackProp(self):
        trainer = BackpropTrainer(self.neuralNet, self.dataSet)
        start = time.time()
        trainer.trainEpochs(EPOCHS)
        end = time.time()
        print("Training time -> " + repr(end-start))
        print(repr(trainer.train()))

    def loadDataSet(self):
        points = []
        for csvFile in glob.iglob("TrainData/*.csv"):
            with open(csvFile, 'rt') as letterSet:
                reader = csv.reader(letterSet)
                header = str(reader.next())
                letter = header[2:3]
                targetStr = header[4:9]
                print("Processing Dataset for letter -> " + letter)
                target = []
                for digit in targetStr:
                    target.append(digit)
                rows = 1
                for row in reader:              
                    for col in row:
                        points.append(col)
                    if rows % 20 == 0:
                        self.dataSet.addSample(points, target)
                        points = []
                    rows += 1
                    
    def processResults(self, output):
        result = ""
        for digit in output:
            if digit > 0.5:
                result += "1"
            else:
                result += "0"
        print("Network result -> " + chr(64+int(result,2)))
                    
    def testNetwork(self):
        points = []
        for csvFile in glob.iglob("TestData/*.csv"):
            with open(csvFile, 'rt') as testPose:
                reader = csv.reader(testPose)
                rows = 1
                for row in reader:
                    for col in row:
                        points.append(col)
                    if rows % 20 == 0:
                        self.processResults(self.neuralNet.activate(points))
                        points = []
                    rows += 1
Ejemplo n.º 9
0
def neural_network(data, target, network):
    DS = SupervisedDataSet(len(data[0]), 1)
    nn = buildNetwork(len(data[0]), 7, 1, bias = True)
    kf = KFold(len(target), 10, shuffle = True);
    RMSE_NN = []
    for train_index, test_index in kf:
        data_train, data_test = data[train_index], data[test_index]
        target_train, target_test = target[train_index], target[test_index]
        for d,t in zip(data_train, target_train):
            DS.addSample(d, t)
        bpTrain = BackpropTrainer(nn,DS, verbose = True)
        #bpTrain.train()
        bpTrain.trainUntilConvergence(maxEpochs = 10)
        p = []
        for d_test in data_test:
            p.append(nn.activate(d_test))
        
        rmse_nn = sqrt(np.mean((p - target_test)**2))
        RMSE_NN.append(rmse_nn)
        DS.clear()
    time = range(1,11)
    plt.figure()
    plt.plot(time, RMSE_NN)
    plt.xlabel('cross-validation time')
    plt.ylabel('RMSE')
    plt.show()
    print(np.mean(RMSE_NN))
def get_dataset_for_pybrain_regression(X,y):
	ds = SupervisedDataSet(250,1)
	tuples_X = [tuple(map(float,tuple(x))) for x in X.values]
	tuples_y = [tuple(map(float,(y,))) for y in y.values]
	for X,y in zip(tuples_X,tuples_y):
		ds.addSample(X,y)
	return ds
Ejemplo n.º 11
0
    def learn(self,dataset):
        """
            This function trains network

            Input:
            dataset     - Dataset to train network

            Returns:
            Nothing
        """
        from pybrain.supervised.trainers import BackpropTrainer
        from pybrain.datasets import SupervisedDataSet
        from neuraltrainer import NeuralTrainer

        if self._net == None: raise NeuralBrainException("Brain is not configured!")
        if dataset == {}: raise NeuralBrainException("Dataset for learning is empty.")

        data = SupervisedDataSet(self._input,self._output)
        for input,output in dataset.items():
            input = self._normalize(input,self._input)
            output = self._normalize(output,self._output)
            data.addSample(input,output)
            data.addSample(input,output)# For better learning 2x

        trainer = NeuralTrainer(self._net, data)
        trainer.simpleTrain()
def get_train_samples(input_num,output_num):
    '''
    从new_samples文件夹中读图,根据输入数和输出数制作样本,每一原始样本加入随机噪音生成100个样本
    '''
    print 'getsample start.'
    sam_path='./new_samples'
    samples = SupervisedDataSet(input_num,output_num)
    nlist = os.listdir(sam_path)
    t=int(np.sqrt(input_num))
    for n in nlist:
        file = os.path.join(sam_path,n)
        im = Image.open(file)
        im = im.convert('L')
        im = im.resize((t,t),Image.BILINEAR)
        buf = np.array(im).reshape(input_num,1)
        buf = buf<200
        buf = tuple(buf)
        buf1=int(n.split('.')[0])
        buf2=range(output_num)
        for i in range(len(buf2)):
            buf2[i] = 0
        buf2[buf1]=1
        buf2 = tuple(buf2)
        samples.addSample(buf,buf2)
        for i in range(100):
            buf3 = list(buf)
            for j in range(len(buf)/20):
                buf3[np.random.randint(len(buf))] = bool(np.random.randint(2))
            samples.addSample(tuple(buf3),buf2)
    return samples 
Ejemplo n.º 13
0
def convertDataNeuralNetwork(x, y):
	data = SupervisedDataSet(x.shape[1], 1)
	for xIns, yIns in zip(x, y):
    	data.addSample(xIns, yIns)    
	return data

def NN(xTrain, yTrain, xTest, yTest):
	trainData = convertDataNeuralNetwork(xTrain, yTrain)
	testData = convertDataNeuralNetwork(xTest, yTest)
	fnn = FeedForwardNetwork()
	inLayer = SigmoidLayer(trainData.indim)
	hiddenLayer = SigmoidLayer(5)
	outLayer = LinearLayer(trainData.outdim)
	fnn.addInputModule(inLayer)
	fnn.addModule(hiddenLayer)
	fnn.addOutputModule(outLayer)
	in_to_hidden = FullConnection(inLayer, hiddenLayer)
	hidden_to_out = FullConnection(hiddenLayer, outLayer)
	fnn.addConnection(in_to_hidden)
	fnn.addConnection(hidden_to_out)
	fnn.sortModules()
	trainer = BackpropTrainer(fnn, dataset = trainData, momentum = 0.1, verbose = True, weightdecay = 0.01)

	for i in xrange(10):
	    trainer.trainEpochs(500)
	    
	rmse = percentError(trainer.testOnClassData(dataset = testData), yTest)
	return rmse/100

def main():
	rmse = NN(xTrain, yTrain, xTest, yTest)
	print rmse

if __name__=="__main__":
	main()
Ejemplo n.º 14
0
    def run_try(self, rand_chance=0, rand_count=0, rand_count_ref=0, render=False):
        ds = SupervisedDataSet(env_size, 1)
        observation = env.reset()

        random_indexes = []

        while len(random_indexes) < rand_count:
            random_index = math.floor(random() * rand_count_ref)
            if random_index not in random_indexes:
                random_indexes.append(random_index)

        for t in range(max_frames):
            if render:
                env.render()
            # print(observation)

            action = 0 if net.activate(observation)[0] < 0 else 1

            if t in random_indexes or random() < rand_chance:
                action = (action + 1) % 1

            ds.addSample(observation, (action,))
            observation, reward, done, info = env.step(action)

            if done:
                print("Episode finished after {} timesteps".format(t + 1))
                break

        if t == max_frames - 1:
            print("Passed!!")
            self.run_try(render=True)

        return t, ds
Ejemplo n.º 15
0
def trainDataSet():
    cases = Case.objects.exclude(geocode__isnull=True, geocode__grid=-1)

    print "Data Representation"
    ds = SupervisedDataSet(5245, 5245)
    for w in xrange(0,52):
        print "Start week w",
        dataset_input = [0 for i in xrange(0,5245)]
        dataset_output = [0 for i in xrange(0,5245)]
        for i in xrange(0,5245):
            dataset_input[i] = cases.filter(geocode__grid=i, morbidity__week=w).count()
            dataset_output[i] = 1 if (cases.filter(geocode__grid=i, morbidity__week=w+1).count() > 0 or cases.filter(geocode__grid=i, morbidity__week=w+2).count() > 0) else 0
        ds.addSample( (dataset_input), (dataset_output))
        print " - done week w"
    # tstdata, trndata = ds.splitWithProportion(0.25)
    print "Train"
    net = buildNetwork( 5245, 1000, 5245, bias=True)
    trainer = BackpropTrainer(net, ds, learningrate=0.1, momentum=0.99)

    terrors = trainer.trainUntilConvergence(verbose = None, validationProportion = 0.33, maxEpochs = 1000, continueEpochs = 10 )
    # print terrors[0][-1],terrors[1][-1]
    fo = open("data.txt", "w")
    for input, expectedOutput in ds:
        output = net.activate(input)
        count = 0
        for q in xrange(0, 5245):
            print math.floor(output[q]), math.floor(expectedOutput[q])
            if math.floor(output[q]) == math.floor(expectedOutput[q]):
                count+=1    
        m = count/5245
        fo.write("{0} ::  {1}".format(count, m));
    def initializeNetwork(self):
        can1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can1.png'), self.encodingDict["can"])
        can2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can2.png'), self.encodingDict["can"])
        can3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/can3.png'), self.encodingDict["can"])
        stain1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain1.png'), self.encodingDict["stain"])
        stain2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain2.png'), self.encodingDict["stain"])
        stain3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/stain3.png'), self.encodingDict["stain"])
        dirt1 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt1.png'), self.encodingDict["dirt"])
        dirt2 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt2.png'), self.encodingDict["dirt"])
        dirt3 = NNTrainData.NNTrainData(cv2.imread('NNTrain/dirt3.png'), self.encodingDict["dirt"])

        self.trainData.append(can1)
        self.trainData.append(can2)
        self.trainData.append(can3)
        self.trainData.append(stain1)
        self.trainData.append(stain2)
        self.trainData.append(stain3)
        self.trainData.append(dirt1)
        self.trainData.append(dirt2)
        self.trainData.append(dirt3)

        for x in self.trainData:
            x.prepareTrainData()

        self.net = buildNetwork(4, 3, 3, hiddenclass=TanhLayer, outclass=SoftmaxLayer)
        ds = SupervisedDataSet(4, 3)

        for x in self.trainData:
            ds.addSample((x.contours/100.0, x.color[0]/1000.0, x.color[1]/1000.0, x.color[2]/1000.0), x.output)

        trainer = BackpropTrainer(self.net, momentum=0.1, verbose=True, weightdecay=0.01)
        trainer.trainOnDataset(ds, 1000)
        trainer.testOnData(verbose=True)
        print "\nSiec nauczona\n"
Ejemplo n.º 17
0
def run_data():
    with open('new_data2.txt') as data_file:
        data = json.load(data_file)
    ds = SupervisedDataSet(1316, 1316)
    for i in xrange(0, 51):
        print "Adding {}th data sample".format(i),
        input = tuple(data[str(i)]['input'])
        output = tuple(data[str(i)]['output'])        
        # print len(input), len(output)
        ds.addSample( input, output)
        print ":: Done"

    print "Train"
    net = buildNetwork( 1316, 100, 1316, bias=True, )
    trainer = BackpropTrainer(net, ds)

    terrors = trainer.trainUntilConvergence(verbose = True, validationProportion = 0.33, maxEpochs = 20, continueEpochs = 10 )
    # print terrors[0][-1],terrors[1][-1]
    fo = open("results2.txt", "w")
    for input, expectedOutput in ds:
        output = net.activate(input)
        count = 0
        for q in xrange(0, 1316):
            print output[q], expectedOutput[q]
            if math.floor(output[q]) == math.floor(expectedOutput[q]):
                count+=1    
        m = float(count)/1316.00
        print "{0} ::  {1}".format(count, m)
        fo.write("{0} ::  {1}\n".format(count, m))
Ejemplo n.º 18
0
def createBetterSupervisedDataSet(input_file):
	print "Creating a BETTER supervised dataset from", input_file

	ds = SupervisedDataSet(nFeatures, 1)
	answers_by_question = {}

	try:
		with open(training_data_pickle_name, 'rb') as p:
			print "Loading from pickle"
			answers_by_question = pickle.load(p)
			print "Load successful"
			print "Size of answers_by_question:", len(answers_by_question.keys())

	except IOError:
		answers_by_question = loadAnswersByQuestion(input_file)

		print "Saving to a pickle..."
		with open(training_data_pickle_name, 'wb') as p:
			pickle.dump(answers_by_question, p)
		print "Saved to", training_data_pickle_name

	# loop to load stuff into ds
	for qid in answers_by_question:
		for aid in answers_by_question[qid]:
			if aid != 'info':
				ds.addSample( tuple(answers_by_question[qid][aid]['data']), (answers_by_question[qid][aid]['target'], ) )
				# ds.addSample(tuple(ans[1]), (ans[0],))

	return ds, answers_by_question
Ejemplo n.º 19
0
def createNet():
	"""Create and seed the intial neural network"""
	#CONSTANTS
	nn_input_dim = 6 #[x_enemy1, y_enemy1, x_enemy2, y_enemy2, x_enemy3, y_enemy3]
	nn_output_dim = 6 #[x_ally1, y_ally1, x_ally2, y_ally2, x_ally3, y_ally3]

	allyTrainingPos, enemyTrainingPos = runExperiments.makeTrainingDataset()

	ds = SupervisedDataSet(nn_input_dim, nn_output_dim)

	#normalizes and adds it to the dataset
	for i in range(0, len(allyTrainingPos)):
		x = normalize(enemyTrainingPos[i])
		y = normalize(allyTrainingPos[i])
		x = [val for pair in x for val in pair]
		y = [val for pair in y for val in pair]
		ds.addSample(x, y)

	for inpt, target in ds:
		print inpt, target

	net = buildNetwork(nn_input_dim, 30, nn_output_dim, bias=True, hiddenclass=TanhLayer)
	trainer = BackpropTrainer(net, ds)
	trainer.trainUntilConvergence()
	NetworkWriter.writeToFile(net, "net.xml")
	enemyTestPos = runExperiments.makeTestDataset()
	print(net.activate([val for pair in normalize(enemyTestPos) for val in pair]))
	return ds
Ejemplo n.º 20
0
def build_dataset():
    ds = SupervisedDataSet(2, 1)
    ds.addSample((0, 0), (0,))
    ds.addSample((0, 1), (1,))
    ds.addSample((1, 0), (1,))
    ds.addSample((1, 1), (0,))
    return ds
Ejemplo n.º 21
0
def getModel(inputSize,hiddenSize1,hiddenSize2,trainData,target):
	fnn = FeedForwardNetwork()
	inLayer = LinearLayer(inputSize,name = 'inLayer')
	hiddenLayer0 = SigmoidLayer(hiddenSize1,name='hiddenLayer0')
	hiddenLayer1 = SigmoidLayer(hiddenSize2,name='hiddenLayer1')
	outLayer = LinearLayer(1,name = 'outLayer')

	fnn.addInputModule(inLayer)
	fnn.addModule(hiddenLayer0)
	fnn.addModule(hiddenLayer1)
	fnn.addOutputModule(outLayer)

	inToHidden0 = FullConnection(inLayer,hiddenLayer0)
	hidden0ToHidden1 = FullConnection(hiddenLayer0,hiddenLayer1)
	hidden1ToHiddenOutput = FullConnection(hiddenLayer1,outLayer)

	fnn.addConnection(inToHidden0)
	fnn.addConnection(hidden0ToHidden1)
	fnn.addConnection(hidden1ToHiddenOutput)

	fnn.sortModules()
	Ds = SupervisedDataSet(inputSize,1)
	scaler = preprocessing.StandardScaler().fit(trainData)
	x = scaler.transform(trainData)
	# print(len(target))
	# print(len(x))
	for i in range(len(target)):
		Ds.addSample(x[i],[target[i]])
	trainer = BackpropTrainer(fnn,Ds,learningrate=0.01,verbose=False)
	trainer.trainUntilConvergence(maxEpochs=1000)
	return fnn
Ejemplo n.º 22
0
    def _build_dataset(self, data):
        """
    Given a input training Dataframe with features and targets it returns the formatted training and validation
    datasets for pybrain usage, and randomly shuffled according to the self.seed given at instantiation.

    ----------

    data: pandas Dataframe
        It must contains both features and target columns

    Returns: (pybrain dataset, pybrain dataset)
        The first is the training dataset and the second is the validation dataset

        """
        np.random.seed(self.seed)
        permutation = np.random.permutation(np.arange(len(data)))
        sep = int(self.train_fraction * len(data))
        x = data[self.features]
        y = data[self.targets]
        ds_train = SupervisedDataSet(self.n_feature, self.n_target)
        ds_valid = SupervisedDataSet(self.n_feature, self.n_target)
        for i in permutation[:sep]:
            ds_train.addSample(x.values[i], y.values[i])
        for i in permutation[sep:]:
            ds_valid.addSample(x.values[i], y.values[i])
        return ds_train, ds_valid
Ejemplo n.º 23
0
class NeuralNet(object):
    def __init__(self, layers):
        self.layers = layers
        self.ds = None
        self.train_error = []
        self.test_error = []
        self.norm_error = []

    def improve(self, n=10):
        trainer = BackpropTrainer(self.nn, self.ds)
        for i in xrange(n):
            self.train_error.append(trainer.train())

    def fit(self, X, y):
        self.nn = buildNetwork(*self.layers, bias=True, hiddenclass=SigmoidLayer)

        self.ds = SupervisedDataSet(self.layers[0], self.layers[-1])
        for i, row in enumerate(X):
            self.ds.addSample(row.tolist(), y[i])
        self.improve()

    def predict(self, X):
        r = []
        for row in X.tolist():
            r.append(self.nn.activate(row))
        return numpy.array(r)
Ejemplo n.º 24
0
def NeuralNetwork(tRiver, qRiver, pRiver, TRiver, qnewRiver, pnewRiver, TnewRiver):
    # build neural network with 20 neurons for historic data on flux, 3 for last 3 temp data, 3 for last precipitation,
    # hidden layer with more than input neurons (hinder specification)
    # and 3 output neurons (flux for next day, first derivative, second derivative

    Ndim = 10+3+3
    Nout = 3
    net = buildNetwork(Ndim, Ndim, Nout, hiddenclass=TanhLayer)
    ds = SupervisedDataSet(Ndim, Nout)

    # next big job: find data values to build up library of training set
    for t in range(len(tRiver)-3):
        input_flow = qRiver[t-20:2:t]
        input_prec = pRiver[t-3:t]
        input_temp = TRiver[t-3:t]
        input_vec = np.hstack([input_flow, input_prec, input_temp])

        output_flow = np.hstack([qRiver[t:t+3]]) # first approx, split later for long predictions
        ds.addSample(input_vec, output_flow)

    trainer = BackpropTrainer(net, ds)
    #trainer.train()
    trainer.trainUntilConvergence()

    # now call it repeatedly on the second set

    prediction = net.activate(np.hstack([qnewRiver[:20], pnewRiver[:3], TnewRiver[:3]]))
    return prediction
Ejemplo n.º 25
0
    def handle(self, *args, **options):
        better_thans = BetterThan.objects.all() #.filter(pk__lte=50)

        ds = SupervisedDataSet(204960, 1)
        for better_than in better_thans:
            bt = imread(better_than.better_than.image.file)
            wt = imread(better_than.worse_than.image.file)
            better_than.better_than.image.file.close()
            better_than.worse_than.image.file.close()

            # bt = filters.sobel(bt)
            # wt = filters.sobel(wt)

            bt_input_array = np.reshape(bt, (bt.shape[0] * bt.shape[1]))
            wt_input_array = np.reshape(wt, (wt.shape[0] * wt.shape[1]))
            input_1 = np.append(bt_input_array, wt_input_array)
            input_2 = np.append(wt_input_array, bt_input_array)
            ds.addSample(np.append(bt_input_array, wt_input_array), [-1])
            ds.addSample(np.append(wt_input_array, bt_input_array), [1])
        
        net = buildNetwork(204960, 2, 1)

        train_ds, test_ds = ds.splitWithProportion(options['train_test_split'])
        _, test_ds = ds.splitWithProportion(options['test_split'])

        trainer = BackpropTrainer(net, ds)

        print 'Looking for -1: {0}'.format(net.activate(np.append(bt_input_array, wt_input_array)))
        print 'Looking for 1: {0}'.format(net.activate(np.append(wt_input_array, bt_input_array)))

        trainer.train()

        print 'Looking for -1: {0}'.format(net.activate(np.append(bt_input_array, wt_input_array)))
        print 'Looking for 1: {0}'.format(net.activate(np.append(wt_input_array, bt_input_array)))
def main():
	inputs = ReadCSV('./data/input.csv')
	outputs = ReadCSV('./data/output.csv')
	
	test_set = test.keys()
	train_set = []
	for k in inputs.keys():
		if k not in test_set:
			train_set.append(k)
	print "Number of training samples", len(train_set)
	print "Number of testing samples", len(test_set)
			
	net = buildNetwork(178, 6, 5)
	ds=SupervisedDataSet(178,5)
	for id in train_set:
		ds.addSample(inputs[id],outputs[id])

	trainer = BackpropTrainer(net, ds, learningrate=0.001, momentum = 0.001)

	trainer.trainUntilConvergence(maxEpochs=1000, validationProportion = 0.5)
	
	
	for id in test_set:
		predicted = net.activate(inputs[id])
		actual = outputs[id]
		print '-----------------------------'
		print test[id]
		print '-----------------------------'
		print 'Trait\t\tPredicted\tActual\tError'
		for i in range(0,5):
			error = abs(predicted[i] - actual[i])*100/4.0
			print traits[i], '\t', predicted[i], '\t', actual[i], '\t', error,"%" 
	def __init__(self, histogram_list):
		self.net = buildNetwork(1024, 100, 1)

		ds = SupervisedDataSet(1024, 1)

		for histogram in histogram_list:
			#print (histogram)			
			ds.addSample(histogram, (1,))
		
		for x in range(0,15):
			ds.addSample(numpy.random.random((1024)) * 255, (0,)) # this noise should never be a face
			#print (numpy.random.random((1024)) * 255)
		
		trainer = BackpropTrainer(self.net, ds)

		#trainer.trainUntilConvergence()

		
		for x in range(2000):
			print ("count:\t" + str(x) + "\terror:\t" + str(trainer.train()))
			#trainer.train()
		
		print (trainer.train())

		"""
Ejemplo n.º 28
0
 def train(self, training_files, learningrate=0.01, scaling=True, noise=False, verbose=True):
     print "building dataset..."
     ds = SupervisedDataSet(SensorModel.array_length(self.sensor_ids), 1)
     # read training file line, create sensormodel object, do backprop
     a = None
     s = None
     for logfile in training_files:
         print "loading file", logfile
         with open(logfile) as f:
             for line in f:
                 if line.startswith("Received:"):
                     s = SensorModel(string=line.split(' ', 1)[1])
                 elif line.startswith("Sending:"):
                     a = Actions.from_string(string=line.split(' ', 1)[1])
                 if s is not None and a is not None:
                     ds.addSample(inp=s.get_array(self.sensor_ids), target=a[self.action_ids[0]])
                     if noise:
                         # add the same training sample again but with noise in the sensors
                         s.add_noise()
                         ds.addSample(inp=s.get_array(self.sensor_ids), target=a[self.action_ids[0]])
                     s = None
                     a = None
     print "dataset size:", len(ds)
     if scaling:
         print "scaling dataset"
         self.scaler_input = StandardScaler(with_mean=True, with_std=False).fit(ds.data['input'])
         ds.data['input'] = self.scaler_input.transform(ds.data['input'])
         ds.data['target'] = ds.data['target']
     #self.trainer = BackpropTrainer(self.net, learningrate=learningrate, verbose=verbose)
     self.trainer = RPropMinusTrainer(self.net, verbose=verbose, batchlearning=True)
     print "training network..."
     self.trainer.trainUntilConvergence(dataset=ds, validationProportion=0.25, maxEpochs=10, continueEpochs=2)
def move_function(board):
    global net  
    best_max_move = None 
    max_value = -1000
    best_min_move = None
    min_value = 1000

    #value is the chance of black winning
    for m in board.get_moves():
        nextboard = board.peek_move(m)
        value = net.activate(board_to_input(nextboard))
        if value > max_value: 
            max_value = value
            best_max_move = m 
        if value < min_value:
            min_value = value
            best_min_move = m

    ds = SupervisedDataSet(97, 1)
    best_move = None 

    #active player
    if board.active == BLACK:
        ds.addSample(board_to_input(board), max_value)
        best_move = best_max_move
    elif board.active == WHITE: 
        ds.addSample(board_to_input(board), min_value)
        best_move = best_min_move

    trainer = BackpropTrainer(net, ds)
    trainer.train()
    NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
    NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml') 
    return best_move 
Ejemplo n.º 30
0
def getErrorPercent(training_dataset, eval_dataset_list, num_hidden, num_epochs):
  num_datapoints = len(training_dataset)
  num_inputs = len(training_dataset[0][0])
  num_outputs = len(training_dataset[0][1])

  # print "Num Inputs:", num_inputs
  # print "Num Outputs:", num_outputs
  # print "Num Hidden Nodes:", num_hidden

  NN = buildNetwork(num_inputs, num_hidden, num_outputs, bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer)

  dataset = SupervisedDataSet(num_inputs, num_outputs)
  for datapoint in training_dataset:
    dataset.addSample(datapoint[0], datapoint[1])


  trainer = BackpropTrainer(NN, dataset=dataset, momentum=0.0, verbose=False, weightdecay=0.0)

  for epoch in range(0, num_epochs):
    #print epoch 
    trainer.train()

  errors = []
  for eval_set in eval_dataset_list:
    total_percent_errors = [0]*num_outputs
    for jj in range(0, len(eval_set)):
      nn_out = NN.activate(eval_set[jj][0])
      percent_error = computeError(eval_set[jj][1], nn_out)
      #print percent_error
      total_percent_errors = map(operator.add, percent_error, total_percent_errors)
    #print total_percent_errors
    errors.append(map(operator.div, total_percent_errors, [len(dataset)]*num_outputs))
  #print errors
  return errors
Ejemplo n.º 31
0
    ]])
ds.setField('target', training['label_tag'])

print("---------------make test data-------------------------------------")

out = SupervisedDataSet(8, 1)

test_item_pre = pd.merge(train_item_df,
                         test_user_weekiter,
                         on=['item_id', 'item_category'],
                         how='inner')

for i_ter in test_item_pre.index:
    out.addSample(
        test_item_pre.ix[i_ter, [
            'cat_view', 'cat_cart', 'cat_mark', 'cat_bought', 'view_tag',
            'mark_tag', 'bought_tag', 'cart_tag'
        ]], 0)

print("--------------predict length------------------------------------")
print(np.shape(test_item_pre)[0])

print("----------------start training-------------------------------------")
trainer = BackpropTrainer(net, ds)
trainer.trainUntilConvergence(maxEpochs=5)

print(
    "--------------------predict-----------------------------------------------"
)
pre = net.activateOnDataset(out)
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.structure.modules import sigmoidlayer

rede = buildNetwork(2, 3, 1)
# print(rede['in'])
# print(rede['hidden0'])
# print(rede['out'])
# print(rede['bias'])

base = SupervisedDataSet(2, 1)
base.addSample((0, 0), (0, ))
base.addSample((1, 0), (1, ))
base.addSample((0, 1), (1, ))
base.addSample((1, 1), (0, ))

treinamento = BackpropTrainer(rede,
                              dataset=base,
                              learningrate=0.01,
                              momentum=0.06)

for i in range(1, 30000):
    erro = treinamento.train()
    if i % 1000 == 0:
        print('Erro {erro}')

print(rede.activate([0, 0]))
print(rede.activate([1, 0]))
print(rede.activate([0, 1]))
         standardizer(enh_array[1:, 3]), standardizer(enh_array[1:, 4]),
         standardizer(enh_array[1:, 5])))

    #this is meant to
    r_lengths = [[i, i + 12] for i in xrange(0, 12 * 100 * 5, 12)]
    #print r_lengths[-1]
    #print len(enh_array)
    #dataset without time
    #print enh_array

    for j in r_lengths:
        temp_data = enh_array[j[0]:j[1]]
        for k in xrange(len(temp_data) - 1):
            ds2.addSample(
                (temp_data[k][0], temp_data[k][1], temp_data[k][2],
                 temp_data[k][3], temp_data[k][4]),
                (temp_data[k + 1][1] - temp_data[k][1], temp_data[k + 1][2] -
                 temp_data[k][2], temp_data[k + 1][3] - temp_data[k][3]))
            #in this one only 4 inputs.

    #both types of ANN will be run, and with 1 and 2 hidden layers

net2 = buildNetwork(
    5,
    20,  # number of hidden units
    3,
    bias=True,
    hiddenclass=TanhLayer,
    outclass=LinearLayer)

net4 = buildNetwork(
Ejemplo n.º 34
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

neuralNetwork = buildNetwork(2, 3, 1, bias=True)

dataset = SupervisedDataSet(2, 1)

dataset.addSample((0, 0), (1))
dataset.addSample((1, 0), (0))
dataset.addSample((0, 1), (0))
dataset.addSample((1, 1), (1))

trainer = BackpropTrainer(neuralNetwork,
                          dataset=dataset,
                          learningRate=0.01,
                          momentum=0.06)

for i in range(1, 10000):
    error = trainer.train()

    if i % 10000 == 0:
        print("Error in iteration ", i, " is: ", error)
        print(neuralNetwork.activate([0, 0]))
        print(neuralNetwork.activate([1, 0]))
        print(neuralNetwork.activate([0, 1]))
        print(neuralNetwork.activate([1, 1]))

print("\n\nFinal result of XOR:\n")
print(neuralNetwork.activate([0, 0]))
print(neuralNetwork.activate([1, 0]))
Ejemplo n.º 35
0
__author__ = 'rin'

from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import TanhLayer
#2x3x1
#net = buildNetwork(2,3,1)
#2 dim in, 1 dim
ds = SupervisedDataSet(2, 1)
ds.addSample((0, 0), (0, ))
ds.addSample((0, 1), (1, ))
ds.addSample((1, 0), (1, ))
ds.addSample((1, 1), (0, ))

ds2 = SupervisedDataSet(2, 1)
ds2.addSample((0, 0), (0, ))
ds2.addSample((0, 1), (1, ))
ds2.addSample((1, 0), (1, ))
ds2.addSample((1, 1), (1, ))

#for inpt, target in ds:
#    print inpt, target

#print(net.activate([2, 1]))
net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)
net2 = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)

trainer = BackpropTrainer(net, ds)
trainer2 = BackpropTrainer(net, ds2)
##trainer.train()
Ejemplo n.º 36
0
    # Load and prepare training and validation files
    train_f = 'housing-training.csv'
    validation_f = 'housing-validation.csv'
    train_data = np.loadtxt(train_f, delimiter=',')
    validation_data = np.loadtxt(validation_f, delimiter=',')
    data = np.vstack((train_data, validation_data))
    X = data[::, :-1]  # feature vectors
    y = data[::, -1]  # target vector
    y = y.reshape(-1, 1)  # transform shape to (n,1) for training

    # Prepare the dataset for pybrain
    input_size = len(X[0])
    target_size = len(y[0])
    ds = SupervisedDataSet(input_size, target_size)
    for inpt, target in zip(X, y):
        ds.addSample(inpt, target)

    # Build the pybrain network
    # source (http://pybrain.org/docs/quickstart/network.html)
    n_input = 13  # 12 features so 12 input neurons
    n_hidden = 100  # Network spec: 100 hidden units
    n_output = 1  # 1 target so a single output neuron
    net = buildNetwork(n_input, n_hidden, n_output)

    # Train the network on the dataset
    trainer = BackpropTrainer(net, ds)
    for epoch in range(1000):
        print(trainer.train())

    # Save model via pickle
    output_file = 'model.pkl'
Ejemplo n.º 37
0
print "minim standarized value: %f" % minim

d1 = 0
d2= 1
print "normalizando datos..."
for d in range(0, 171, 1):
	normalized_data.append(((standarized_data[d] - minim)* (d2 - d1) / (maxim - minim)) + d1)

# print normalized_data
# ################################       https://www.mql5.com/es/articles/497    ##################################3
# ################################       http://es.slideshare.net/mentelibre/diseo-y-entrenamiento-de-redes-neuronales-artificiales    ##################################3

#creating the data set
trainingSet = SupervisedDataSet(6, 1)
for i in range(0, 113, 1):
	trainingSet.addSample((normalized_data[i], normalized_data[i+1], normalized_data[i+2],
						normalized_data[i+3], normalized_data[i+4], normalized_data[i+5]), (normalized_data[i+6]))#116 tuples

# print "trainginSet"
# for inpt, target in trainingSet:
# 	print inpt, target

ds = SupervisedDataSet(6, 1)
for i in range(119, 159, 1):
	ds.addSample((normalized_data[i], normalized_data[i+1], normalized_data[i+2],
				normalized_data[i+3], normalized_data[i+4], normalized_data[i+5]), (normalized_data[i+6]))#44 tuples

# print "dataset"
# for inpt, target in ds:
# 	print inpt, target

net = buildNetwork(6, 3, 1, bias = True, hiddenclass = SigmoidLayer)
Ejemplo n.º 38
0
from __future__ import print_function
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

data_set = SupervisedDataSet(1, 3)

data_set.addSample((0.120, ), [0, 0, 1])
data_set.addSample((0.80, ), [0, 1, 0])
data_set.addSample((0.70, ), [1, 0, 0])

neural_network = buildNetwork(1, 4, 3, bias=True)

trainer = BackpropTrainer(neural_network, data_set)

for i in range(2000):
    print(trainer.train())

while True:
    param_1 = float(raw_input("Param 1: \n"))

    resp = neural_network.activate((param_1, ))

    print("FOI: ", resp)
Ejemplo n.º 39
0
 a = lambda x: exp(multiply(-1, sqrt(x)))
 b = lambda x: arctan(x)
 network1 = buildNetwork(
     1, 3,
     1) if not isfile("./Q2A.xml") else NetworkReader.readFrom("./Q2A.xml")
 network2 = buildNetwork(
     1, 3,
     1) if not isfile("./Q2B.xml") else NetworkReader.readFrom("./Q2B.xml")
 if not isfile("./Q2A.xml"):
     NetworkWriter.writeToFile(network1, "./Q2A.xml")
 if not isfile("./Q2B.xml"):
     NetworkWriter.writeToFile(network2, "./Q2B.xml")
 trainingSets1 = SupervisedDataSet(1, 1)
 trainingSets2 = SupervisedDataSet(1, 1)
 [
     trainingSets1.addSample(trainingSetsA[i], a(trainingSetsA[i]))
     for i in range(len(trainingSetsA))
 ]
 [
     trainingSets2.addSample(trainingSetsB[i], b(trainingSetsB[i]))
     for i in range(len(trainingSetsB))
 ]
 trainer1 = BackpropTrainer(network1, trainingSets1, learningrate=0.1)
 trainer2 = BackpropTrainer(network2, trainingSets2, learningrate=0.1)
 trainer1.trainUntilConvergence()
 trainer2.trainUntilConvergence()
 trainOutputA = []
 trainOutputB = []
 validateOutputA = []
 validateOutputB = []
 errA = []
Ejemplo n.º 40
0
# Here we define the data model for the XOR function.
# Each sublist [Inputs, Output] consists of the #following tuples: Inputs: (x,y) and Output: (x XOR y).
dataModel = [[(0, 0), (0, )], [(0, 1), (1, )], [(1, 0), (1, )], [(1, 1),
                                                                 (0, )]]

# Instantiating the Network.
annet = buildNetwork(2, 2, 1, bias=True)

# To view the structure of the Network.
print annet

# Creation of the default dataset for backpropagation.
datset = SupervisedDataSet(2, 1)
for inputs, target in dataModel:
    datset.addSample(inputs, target)

# Creation of the training dataset which would be used to train the network till convergence.
training_set = SupervisedDataSet(2, 1)

for iter in xrange(1000):
    # Randomly selecting input-output records from the 4 possible, valid inputs for the XOR gate.
    rand_val = rnd.randint(0, 3)
    inputs, target = dataModel[rand_val]
    training_set.addSample(inputs, target)

# Setting up the trainer which utilizes Back-Propagation technique. Here 'datset' simply serves as the default dataset.
# It won't be utilized for training the network.
trainer = BackpropTrainer(annet, datset, learningrate=0.01, momentum=0.9)

# Training the set for 15 max. learning cycles till convergence using training_set as training data.
Ejemplo n.º 41
0
                rapdict.append([str(i), int(syllablesentencecount(str(i))), rhyme_master_list.index(rhymeschemeofsentence(i))])
            except Exception:
                print "Hm, for some reason we couldn't do anything with this line - remove symbols from it and try again: " + str(i)
dictionarybuilder()
print rapdict

# makes a dataset
ds = SupervisedDataSet(4,4)
# the dataset is in the form of the amount of syllables and rhyme scheme of TWO lines that are next to each other in the song.


for i in rapdict[:-3]:
    if i != "" and rapdict[rapdict.index(i) + 1] != "" and rapdict[rapdict.index(i) + 2] != "" and rapdict[rapdict.index(i) + 3] != "":
        twobars = [i[1], i[2], rapdict[rapdict.index(i) + 1][1], rapdict[rapdict.index(i) + 1][2], rapdict[rapdict.index(i) + 2][1], rapdict[rapdict.index(i) + 2][2], rapdict[rapdict.index(i) + 3][1], rapdict[rapdict.index(i) + 3][2]]
        print twobars
        ds.addSample((twobars[0] / float(20), int(twobars[1]) / float(len(rhyme_master_list)), twobars[2] / float(20), int(twobars[3]) / float(len(rhyme_master_list))), (twobars[4] / float(20), int(twobars[5]) / float(len(rhyme_master_list)), twobars[6] / float(20), int(twobars[7]) / float(len(rhyme_master_list))))

print ds

# Only uncomment this if you are training it on lyrics yourself.
# this part gets a neural network, trains it on lyrics and syllables and then saves it.
"""
net = buildNetwork(4,6,6,6,4,recurrent=True)
t = BackpropTrainer(net,learningrate=0.05,momentum=0.5,verbose=True)
t.trainOnDataset(ds,100)
t.testOnData(verbose=True)

fileObject = open('trained_net', 'w')
pickle.dump(net, fileObject)
fileObject.close()
"""
Ejemplo n.º 42
0
        for i in xrange(window_s, window_s + INPUT_LEN):
            if prev != 0:
                input_rates.append(exchange_rates[i][1] - prev)
            else:
                input_rates.append(0)
            prev = exchange_rates[i][1]
        for i in xrange(window_s + INPUT_LEN, window_s + INPUT_LEN + OUTPUT_LEN):
            output_rates.append(exchange_rates[i][1])


        y_arr = np.array(output_rates)
        angle = np.polyfit(x_arr, y_arr, 1)[0]
        #    print "learn_angle " + str(angle)
#        print "add angle" + str(angle)
#        print "add input_rates len " + str(len(input_rates))
        ds.addSample(input_rates, [angle])

    trainer = BackpropTrainer(rnn_net, **parameters)
    trainer.setData(ds)
    trainer.train()

    del ds  # release memory

    # predict
    rnn_net.reset()

    dump_fd = open("./rnn_net.dump", "w")
    pickle.dump(rnn_net, dump_fd)
### training end

# frslt = open('../test/rnn_result8.csv', 'w')
# Aprovado
for i in range(1, 8, 1):
    if i < 10:
        nome = "Aprovado/M0" + str(i) + ".wav"
    else:
        nome = "Aprovado/M" + str(i) + ".wav"

    fs, data = wavfile.read(nome)
    p = 20 * np.log10(np.abs(np.fft.rfft(data)))
    # GraficoDbxFreq(fs,p)
    b = p.tolist()
    a = []
    for j in range(3000):
        a = a + [sum(b[(10 * j):(10 * (j + 1))]) / 10]

    base.addSample(tuple(a), (1))

# Negado
for i in range(1, 18, 1):
    if i < 10:
        nome = "Negado/0" + str(i) + ".wav"
    else:
        nome = "Negado/" + str(i) + ".wav"

    fs, data = wavfile.read(nome)
    p = 20 * np.log10(np.abs(np.fft.rfft(data)))
    #f = np.linspace(0, fs/2.0, len(p))
    b = p.tolist()
    a = []
    for j in range(3000):
        a = a + [sum(b[(10 * j):(10 * (j + 1))]) / 10]
from pybrain.structure import TanhLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork


def pesos_conexiones(n):
    for mod in n.modules:
        for conn in n.connections[mod]:
            print conn
            for cc in range(len(conn.params)):
                print conn.whichBuffers(cc), conn.params[cc]


ds = SupervisedDataSet(2, 1)

ds.addSample((0, 0), 0)
ds.addSample((0, 100), 1)
ds.addSample((1, 0), 1)
ds.addSample((1, 1), 0)

net = buildNetwork(2, 3, 1, bias=True, hiddenclass=TanhLayer)
trainer = BackpropTrainer(net, ds)

print trainer.trainEpochs(1000)

print net['hidden0']

net.activate([2, 1])
print net['hidden0']

print '\n\n\n\n\n'
Ejemplo n.º 45
0
def random_data(dataset):
    ds=SupervisedDataSet(dataset['input'].shape[1], 2)
    ds.clear()
    for i in np.random.permutation(len(dataset)):
        ds.addSample(dataset['input'][i],dataset['target'][i])
    return ds
Ejemplo n.º 46
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.supervised.trainers import BackpropTrainer

neuralNetwork = buildNetwork(25, 15, 10, bias=True)

dataset = SupervisedDataSet(25, 10)
# entenda o 0 e que nao esta pintada e 1 esta pintada
#a rede vai sair analizando aparti dos dados colocados como pintado ou nao pintados , professor observe a imagem la.
dataset.addSample((0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0,
                   0, 1, 1, 1, 0), (1, 0, 0, 0, 0, 0, 0, 0, 0, 0))  #0
dataset.addSample((0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0,
                   0, 0, 1, 0, 0), (0, 1, 0, 0, 0, 0, 0, 0, 0, 0))  #1
dataset.addSample((0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0,
                   0, 1, 1, 1, 0), (0, 0, 1, 0, 0, 0, 0, 0, 0, 0))  #2
dataset.addSample((0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0,
                   0, 1, 1, 1, 0), (0, 0, 0, 1, 0, 0, 0, 0, 0, 0))  #3
dataset.addSample((0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,
                   0, 0, 0, 1, 0), (0, 0, 0, 0, 1, 0, 0, 0, 0, 0))  #4
dataset.addSample((0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,
                   0, 1, 1, 1, 0), (0, 0, 0, 0, 0, 1, 0, 0, 0, 0))  #5
dataset.addSample((0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0,
                   0, 1, 1, 1, 0), (0, 0, 0, 0, 0, 0, 1, 0, 0, 0))  #6
dataset.addSample((0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0,
                   0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 0, 1, 0, 0))  #7
dataset.addSample((0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0,
                   0, 1, 1, 1, 0), (0, 0, 0, 0, 0, 0, 0, 0, 1, 0))  #8
dataset.addSample((0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0,
                   0, 1, 1, 1, 0), (0, 0, 0, 0, 0, 0, 0, 0, 0, 1))  #9
#ai esta a representaçao do numeros
Ejemplo n.º 47
0
    for letter in word:
        if letter in voiceless_con:
            embedding_data[3] = 1.0
        if letter in voiced_con:
            embedding_data[4] = 1.0
    return embedding_data


train_path = 'training.txt'
train_wordlist, inputs, outputs = load_data(train_path, '\t')
# print(inputs)
# print(outputs)

dataset = SupervisedDataSet(5, 6)
for index in range(len(inputs)):
    dataset.addSample(tuple(inputs[index]), tuple(outputs[index]))

network = buildNetwork(5, 6, 6)
trainer = BackpropTrainer(network, dataset)

#
# for j in range(200):
#     trainer.train()

err_train, err_valid = trainer.trainUntilConvergence(maxEpochs=500)
plt.plot(err_train, 'b', err_valid, 'r')
plt.show()
# close the plot to continue the program.

test_path = 'test.txt'
test_wordlist, test_input, english_wordlist = load_test(test_path, '\t')
Ejemplo n.º 48
0
magic5 = [8, 9, 17, 18, 20]
magic6 = [9, 14, 18, 19, 20]
magic7 = [9, 17, 18, 19, 20]
magic8 = [14, 17, 18, 19, 20]
magic9 = [9, 10, 12, 13, 15]

for tuple in data_training_raw:
    tuple_data = []
    output_tmp1 = []
    for i in range(6, 21):
        if i == magic1[0] or i == magic1[1] or i == magic1[2] or i == magic1[
                3] or i == magic1[4]:
            output_tmp1.append(float(tuple[i]))
        else:
            tuple_data.append(float(tuple[i]))
    ds1.addSample(tuple_data, output_tmp1)
for tuple in data_training_raw:
    output_tmp2 = []
    tuple_data = []
    for i in range(6, 21):
        if i == magic2[0] or i == magic2[1] or i == magic2[2] or i == magic2[
                3] or i == magic2[4]:
            #here
            output_tmp2.append(float(tuple[i]))
        else:
            tuple_data.append(float(tuple[i]))
    #here                                   #here
    ds2.addSample(tuple_data, output_tmp2)
for tuple in data_training_raw:
    tuple_data = []
    output_tmp3 = []
Ejemplo n.º 49
0
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure import SigmoidLayer
from pybrain.datasets import SupervisedDataSet

net = buildNetwork(400,25,10,bias=True,hiddenclass=SigmoidLayer)

import numpy
import scipy
from scipy.io import loadmat as loadmat

data = loadmat("ex4data1.mat")
X = data['X']
y = data['y']

ds = SupervisedDataSet(400,10)

for idx,v in enumerate(X):
    ds.addSample(v,y[idx] == y[idx][0])

print len(ds),net
trainer = BackpropTrainer(net, ds)

for i in range(100):
    print trainer.train()

print net.params,net.params.shape
#print trainer.trainUntilConvergence()


Ejemplo n.º 50
0

if __name__ == '__main__':

    from pylab import figure, show

    # --- example on how to use the GP in 1 dimension
    ds = SupervisedDataSet(1, 1)
    gp = GaussianProcess(indim=1, start=-3, stop=3, step=0.05)
    figure()

    x = mgrid[-3:3:0.2]
    y = 0.1 * x**2 + x + 1
    z = sin(x) + 0.5 * cos(y)

    ds.addSample(-2.5, -1)
    ds.addSample(-1.0, 3)
    gp.mean = 0

    # new feature "autonoise" adds uncertainty to data depending on
    # it's distance to other points in the dataset. not tested much yet.
    # gp.autonoise = True

    gp.trainOnDataset(ds)
    gp.plotCurves(showSamples=True)

    # you can also test the gp on single points, but this deletes the
    # original testing grid. it can be restored with a call to _buildGrid()
    print((gp.testOnArray(array([[0.4]]))))

    # --- example on how to use the GP in 2 dimensions
Ejemplo n.º 51
0
class NeuralNet():
    def __init__(self, inputDim, outputDim):
        '''
	Initializes class parameters
	
	Input:   

        '''
        self.inputDimension = inputDim
        self.outputDimension = outputDim
        self.net = buildNetwork(inputDim, outputDim)
        self.ds = SupervisedDataSet(self.inputDimension, self.outputDimension)

        print "dimensions : " + str(self.inputDimension) + "x" + str(
            self.outputDimension)

    def setTheta(self, theta):
        self.net._setParameters(theta)

    def getTheta(self):
        return self.net.params

    def loadTheta(self, thetaFile):
        self.net._setParameters(np.loadtxt(thetaFile))
        #print ("theta LOAD : ", self.net.params)
        return self.net.params

    def saveTheta(self, fileName):
        '''
        Records theta under numpy format
        
        Input:    -fileName: name of the file where theta will be recorded
        '''
        np.savetxt(fileName, self.net.params)

    def getTrainingData(self):
        for i in range(200):
            for j in range(200):
                output = []
                x = i / 200.0
                y = j / 200.0
                if x > 0.5 and y > 0.5:
                    output.append(rd.random() / 2 + 1.5)
                else:
                    output.append(rd.random() / 2)
                self.ds.addSample([x, y], output)

    def getTrainingData2(self):
        for i in range(2000):
            output = []
            x = rd.random()
            y = rd.random() / 2.0
            if x > 0.5 and y > 0.5:
                output.append(rd.random() / 2 + 1.5)
            else:
                output.append(rd.random() / 2)
            self.ds.addSample([x, y], output)
        for i in range(1000):
            output = []
            x = 0.5 + rd.random() / 2.0
            y = 0.5 + rd.random() / 2.0
            output.append(rd.random() / 2 + 1.5)
            self.ds.addSample([x, y], output)

    def train(self):
        '''
        Perform batch regression
        '''
        self.getTrainingData2()
        trainer = BackpropTrainer(self.net, self.ds)
        trainer.train()

    def computeOutput(self, inputVal):
        '''
        Returns the output depending on the given input and theta
        
        Input:      -inputVal: numpy N-D array
                    -theta: numpy N-D array
        
        Output:     -fa_out: numpy N-D array, output approximated
        '''
        assert (inputVal.shape[0] == self.inputDimension
                ), "NeuralNet: Bad input format"
        return self.net.activate(inputVal)
Ejemplo n.º 52
0
        else:
            d += 1
        x = [0.0 for _ in xrange(NUM_FEATURES)]
        players = record['players']
        for player in players:
            hero_id = player['hero_id'] - 1

            # If the left-most bit of player_slot is set,
            # this player is on dire, so push the index accordingly
            player_slot = player['player_slot']
            if player_slot >= 128:
                hero_id += NUM_HEROES

            x[hero_id] = 1.0

        ds.addSample(x, y)

        y = 1.0 - y
        x = [0.0 for _ in xrange(NUM_FEATURES)]
        players = record['players']
        for player in players:
            hero_id = player['hero_id'] - 1

            # If the left-most bit of player_slot is set,
            # this player is on dire, so push the index accordingly
            player_slot = player['player_slot']
            if player_slot < 128:
                hero_id += NUM_HEROES

            x[hero_id] = 1.0
from pybrain.supervised.trainers import BackpropTrainer

import numpy as np


def fresadora(t):
    return ((2 / 5) + (1 / 10 * np.e**(-5 * t)) - ((1 / 2) * np.e**(-t)))


ds = SupervisedDataSet(1, 1)

entradasX = np.arange(0, 10, 0.25)
targetsY = fresadora(entradasX)

for k in range(len(entradasX)):
    ds.addSample(entradasX[k], targetsY[k])

#buildNetwork(NUMERO DE NEURONIOS NA ENTRADA, NEURONIOS NA CAMADA OCULTA, CAMADA DE SAIDA, bias=True)

#PARAMETERS
neuronsIn = 3  #neuronios na camada de entrada
neuronsHL = 4  # neuronios na camada oculta
neuronsOut = 1  # neuronios na camada de saida
epochs = 200  #epocas

#rede
nn = buildNetwork(3, 3, 1, bias=True)

#trainer
trainer = BackpropTrainer(nn, ds)
Ejemplo n.º 54
0
>>> for i in range(len(results)):
>>>     print data_set['input'][i][0], 'xor', data_set['input'][i][1], '=', int(results[i] > 0.5)
0.0 xor 0.0 = 0
0.0 xor 1.0 = 1
1.0 xor 0.0 = 1
1.0 xor 1.0 = 0
"""
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.datasets import SupervisedDataSet
from pybrain.structure import SigmoidLayer

network = buildNetwork(2, 5, 1, hiddenclass=SigmoidLayer)

data_set = SupervisedDataSet(2, 1)
data_set.addSample((0, 0), [0])
data_set.addSample((0, 1), [1])
data_set.addSample((1, 0), [1])
data_set.addSample((1, 1), [0])

trainer = BackpropTrainer(module=network,
                          dataset=data_set,
                          momentum=0.00,
                          learningrate=0.10,
                          weightdecay=0.0,
                          lrdecay=1.0)

error = 1
epochsToTrain = 0
while error > 0.0001:
    epochsToTrain += 1
def end_function(board, lose):
    global net 

    ds = SupervisedDataSet(97, 1)

    if lose:
        if board.active == BLACK:
            ds.addSample(board_to_input(board), 0)
            whiteboard = board_to_input(board)
            whiteboard[96] = 0
            ds.addSample(whiteboard, 1) 
        elif board.active == WHITE: 
            ds.addSample(board_to_input(board), 1)
            blackboard = board_to_input(board)
            blackboard[96] = 1
            ds.addSample(blackboard, 0) 
    else:
        #black loses
        if board.active == BLACK:
            ds.addSample(board_to_input(board), 0)
            whiteboard = board_to_input(board)
            whiteboard[96] = 0
            ds.addSample(whiteboard, 0) 

        #black wins
        elif board.active == WHITE: 
            ds.addSample(board_to_input(board), 1)
            blackboard = board_to_input(board)
            blackboard[96] = 1
            ds.addSample(blackboard, 1) 

    trainer = BackpropTrainer(net, ds)
    trainer.train()

    NetworkWriter.writeToFile(net, 'CheckersMini/synapsemon_random_black_mini_140.xml')
    NetworkWriter.writeToFile(net, 'SynapsemonPie/synapsemon_random_black_mini_140_copy.xml') 
Ejemplo n.º 56
0
    low = float(row[5])
    nextClose = float(row[6])
    #ds.addSample((low,high,opens,close),(nextClose),)
    close_n_value = ((close - close_min_Value) /
                     (close_max_Value - close_min_Value) - 0.5) * 2
    volume_n_value = ((volume - volume_min_Value) /
                      (volume_max_Value - volume_min_Value) - 0.5) * 2
    opens_n_value = ((opens - opens_min_Value) /
                     (opens_max_Value - opens_min_Value) - 0.5) * 2
    high_n_value = ((high - high_min_Value) /
                    (high_max_Value - high_min_Value) - 0.5) * 2
    low_n_value = ((low - low_min_Value) /
                   (low_max_Value - low_min_Value) - 0.5) * 2
    nextClose_n_value = ((close - close_min_Value) /
                         (close_max_Value - close_min_Value) - 0.5) * 2
    ds.addSample((low_n_value, high_n_value, opens_n_value, close_n_value),
                 (nextClose_n_value, ))

print("Dataset Built")
#print(ds)
#for inpt, target in ds:
#    print (inpt, target)

#normalize data

trainer = BackpropTrainer(n, ds)

#for i in range(100):
#    s = trainer.train()
#trainer.trainUntilConvergence()

trainer.trainUntilConvergence(verbose=True,
Ejemplo n.º 57
0
        # Calculate updated Q-Values
        costVector = list(qValues)
        if rewardTransition < 0:
            costVector[targetDirDiscrete] = rewardTransition
            rw = rewardTransition
        else:
            costVector[targetDirDiscrete] = ALPHA_FACTOR * costVector[
                targetDirDiscrete] + (1.0 - ALPHA_FACTOR) * rewardTransition
            rw = (1.0 - ALPHA_FACTOR) * rewardTransition

        # Learn the updated Q-value.
        if isLearning:
            ds.clear()
            ds.addSample(
                (stepStartingPos[0] / XSIZE, stepStartingPos[1] / YSIZE,
                 math.sin(oldDir * 0.25 * math.pi),
                 math.cos(oldDir * 0.25 * math.pi)), rw)
            trainer.setData(ds)
            trainer.trainEpochs(1)

    # ====================================
    # Final paint step
    # ====================================
    if pygame.display.get_active():
        clock.tick(2)
        screen.blit(screenBuffer, (0, 0))
        pygame.display.flip()
    print "Trainer Alpha Value: ", trainer.descent.alpha

    # Let's look at the events. Key presses from 0 to 8 are possible, as well as space for switching between Q values and best direction painting. ESCape ends the program.
    for event in pygame.event.get():
Ejemplo n.º 58
0
X = dataset.reshape((dataset.shape[0], dataset.shape[1] * dataset.shape[2]))

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = \
 train_test_split(X, y, train_size=0.9)

plt.savefig("foto2.png")

# Desde acá no da.

from pybrain.datasets import SupervisedDataSet

training = SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_train.shape[0]):
    training.addSample(X_train[i], y_train[i])

    testing = SupervisedDataSet(X.shape[1], y.shape[1])
for i in range(X_test.shape[0]):
    testing.addSample(X_test[i], y_test[i])

    from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(X.shape[1], 100, y.shape[1], bias=True)

from pybrain.supervised.trainers import BackpropTrainer
trainer = BackpropTrainer(net, training, learningrate=0.01, weightdecay=0.01)

trainer.trainEpochs(epochs=20)
predictions = trainer.testOnClassData(dataset=testing)

from sklearn.metrics import f1_score
Ejemplo n.º 59
0
        num = 0

        for playerData in fieldData:
            isTrue = 0
            if player[0] < playerData[1] and decisionRaw[
                    -4:] == 'left' or player[0] > playerData[
                        1] and decisionRaw[-4:] == 'ight' or player[
                            1] < playerData[2] and decisionRaw[
                                -4:] == 'down' or player[1] > playerData[
                                    2] and decisionRaw[-2:] == 'up':
                isTrue = 1
            features.append([playerData, isTrue])

    dataSet = SupervisedDataSet(InputLayerSize, 1)
    for feature in features:
        dataSet.addSample(feature[0], feature[1])

    Network = buildNetwork(dataSet.indim,
                           InputLayerSize * 2,
                           InputLayerSize * 2,
                           1,
                           hiddenclass=SigmoidLayer,
                           outclass=SigmoidLayer,
                           bias=True)
    trainer = BackpropTrainer(Network,
                              dataSet,
                              learningrate=0.001,
                              momentum=0.001)

    trainer.trainUntilConvergence()
Ejemplo n.º 60
0
    def getFitness(self, smMatrix):  #Store the sm state into memory
        fit = 0

        #Fitness function (3) *************************************************************
        #Record the sm data for this loop and consider its properties
        #print(smMatrix)
        #print(len(smMatrix))

        #net = buildNetwork(3,10,1, bias = True)
        net = FeedForwardNetwork()
        inp = LinearLayer(3)
        h1 = SigmoidLayer(10)
        outp = LinearLayer(1)
        # add modules
        net.addOutputModule(outp)
        net.addInputModule(inp)
        net.addModule(h1)
        # create connections
        iToH = FullConnection(inp, h1)
        hToO = FullConnection(h1, outp)
        net.addConnection(iToH)
        net.addConnection(hToO)
        # finish up
        net.sortModules()

        ds = SupervisedDataSet(3, 1)

        trainSet = []
        for index_x, x in enumerate(smMatrix):
            if index_x > 0 and index_x < len(smMatrix) - 1:
                #trainSet.append( [smMatrix[index_x][0], smMatrix[index_x][1], smMatrix[index_x][2], smMatrix[index_x+1][3] ] )
                ds.addSample(([
                    smMatrix[index_x][0], smMatrix[index_x][1],
                    smMatrix[index_x][2]
                ]), (smMatrix[index_x + 1][3]))
        #print(trainSet)
        #print(ds)
        trainer = BackpropTrainer(net, ds, weightdecay=0.01)
        err = trainer.trainUntilConvergence(maxEpochs=50)
        #Visualize the network performance and structure.

        #nn = NNregression(ds, epoinc = 10)
        #nn.setupNN()
        #nn.runTraining()
        #self.pesos_conexiones(net)
        #print("Input to hidden", iToH.params)
        #print("H to output", hToO.params)
        #print(iToH.params)
        n1 = iToH.params
        n1a = zip(*[iter(n1)] * 3)
        n2 = hToO.params

        sums = []
        for x in n1a:
            sumr = 0
            for y in x:
                sumr = sumr + abs(y)
            sums.append(sumr)

        sums2 = []
        for x in n2:
            sums2.append(abs(x))

        #Choose those neurons that have inputs below a threshold value
        a1 = [index for index, value in enumerate(sums) if value > 2.0]
        a2 = [index for index, value in enumerate(sums2) if value > 0.5]
        inter = len(set(a1).intersection(set(a2)))
        fit = inter
        #fit = sum(n1a[:]) + sum(n2[:])
        print fit
        return fit