Esempio n. 1
0
def main():
    print "Calculating mfcc...."
    mfcc_coeff_vectors_dict = {}
    for i in range(1, 201):
        extractor = FeatureExtractor(
            '/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Happiness/HappinessAudios/' + str(i) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    for i in range(201, 401):
        extractor = FeatureExtractor(
            '/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Sadness/SadnessAudios/' + str(i - 200) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    audio_with_min_frames, min_frames = get_min_frames_audio(
        mfcc_coeff_vectors_dict)
    processed_mfcc_coeff = preprocess_input_vectors(
        mfcc_coeff_vectors_dict, min_frames)
    # frames = min_frames
    # print frames
    # print len(processed_mfcc_coeff['1'])
    # for each_vector in processed_mfcc_coeff['1']:
    #     print len(each_vector)
    print "mffcc found..."
    classes = ["happiness", "sadness"]

    training_data = ClassificationDataSet(
        26, target=1, nb_classes=2, class_labels=classes)
    # training_data = SupervisedDataSet(13, 1)
    try:
        network = NetworkReader.readFrom(
            'network_state_frame_level_new2_no_pp1.xml')
    except:
        for i in range(1, 51):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            for each_vector in mfcc_coeff_vectors:
                training_data.appendLinked(each_vector, [1])

        for i in range(201, 251):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            for each_vector in mfcc_coeff_vectors:
                training_data.appendLinked(each_vector, [0])

        training_data._convertToOneOfMany()
        print "prepared training data.."
        print training_data.indim, training_data.outdim
        network = buildNetwork(
            training_data.indim, 5, training_data.outdim, fast=True)
        trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99)
        print "Before training...", trainer.testOnData(training_data)
        trainer.trainOnDataset(training_data, 1000)
        print "After training...", trainer.testOnData(training_data)
        NetworkWriter.writeToFile(
            network, "network_state_frame_level_new2_no_pp.xml")
def main():
    print "Calculating mfcc...."
    mfcc_coeff_vectors_dict = {}
    for i in range(1, 201):
        extractor = FeatureExtractor('/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Happiness/HappinessAudios/' + str(i) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    for i in range(201, 401):
        extractor = FeatureExtractor('/home/venkatesh/Venki/FINAL_SEM/Project/Datasets/Sadness/SadnessAudios/' + str(i - 200) + '.wav')
        mfcc_coeff_vectors = extractor.calculate_mfcc()
        mfcc_coeff_vectors_dict.update({str(i): (mfcc_coeff_vectors, mfcc_coeff_vectors.shape[0])})

    audio_with_min_frames, min_frames = get_min_frames_audio(mfcc_coeff_vectors_dict)
    processed_mfcc_coeff = preprocess_input_vectors(mfcc_coeff_vectors_dict, min_frames)
    frames = min_frames
    print "mfcc found...."
    classes = ["happiness", "sadness"]
    try:
        network = NetworkReader.readFrom('network_state_new_.xml')
    except:
        # Create new network and start Training
        training_data = ClassificationDataSet(frames * 26, target=1, nb_classes=2, class_labels=classes)
        # training_data = SupervisedDataSet(frames * 39, 1)
        for i in range(1, 151):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            training_data.appendLinked(mfcc_coeff_vectors.ravel(), [1])
            # training_data.addSample(mfcc_coeff_vectors.ravel(), [1])

        for i in range(201, 351):
            mfcc_coeff_vectors = processed_mfcc_coeff[str(i)]
            training_data.appendLinked(mfcc_coeff_vectors.ravel(), [0])
            # training_data.addSample(mfcc_coeff_vectors.ravel(), [0])

        training_data._convertToOneOfMany()
        network = buildNetwork(training_data.indim, 5, training_data.outdim)
        trainer = BackpropTrainer(network, learningrate=0.01, momentum=0.99)
        print "Before training...", trainer.testOnData(training_data)
        trainer.trainOnDataset(training_data, 1000)
        print "After training...", trainer.testOnData(training_data)
        NetworkWriter.writeToFile(network, "network_state_new_.xml")

    print "*" * 30 , "Happiness Detection", "*" * 30
    for i in range(151, 201):
        output = network.activate(processed_mfcc_coeff[str(i)].ravel())
        # print output,
        # if output > 0.7:
        #     print "happiness"
        class_index = max(xrange(len(output)), key=output.__getitem__)
        class_name = classes[class_index]
        print class_name
Esempio n. 3
0
    def train(network_file, input_length, output_length, training_data_file,
              learning_rate, momentum, stop_on_convergence, epochs, classify):
        n = get_network(network_file)
        if classify:
            ds = ClassificationDataSet(int(input_length),
                                       int(output_length) * 2)
            ds._convertToOneOfMany()
        else:
            ds = SupervisedDataSet(int(input_length), int(output_length))
        training_data = get_training_data(training_data_file)

        NetworkManager.last_training_set_length = 0
        for line in training_data:
            data = [float(x) for x in line.strip().split(',') if x != '']
            input_data = tuple(data[:(int(input_length))])
            output_data = tuple(data[(int(input_length)):])
            ds.addSample(input_data, output_data)
            NetworkManager.last_training_set_length += 1

        t = BackpropTrainer(n,
                            learningrate=learning_rate,
                            momentum=momentum,
                            verbose=True)
        print "training network " + network_storage_path + network_file

        if stop_on_convergence:
            t.trainUntilConvergence(ds, epochs)
        else:
            if classify:
                t.trainOnDataset(ds['class'], epochs)
            else:
                t.trainOnDataset(ds, epochs)

        error = t.testOnData()
        print "training done"
        if not math.isnan(error):
            save_network(n, network_file)
        else:
            print "error occured, network not saved"

        print "network saved"

        return error
Esempio n. 4
0
def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate, para_momentum
):  # netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while i < sampleNum:
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(
        netStructure[0],
        netStructure[1],
        netStructure[2],
        netStructure[3],
        hiddenclass=SigmoidLayer,
        outclass=SigmoidLayer,
    )
    T = BackpropTrainer(Network, Dataset, learningrate=para_rate, momentum=para_momentum, verbose=True)
    # print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001:
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  # this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    # print(testLabel)
    print(Network.activate([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
    return errorList
Esempio n. 5
0
def ANN(
    trainFeature, trainLabel, testFeature, testLabel, netStructure, para_rate,
    para_momentum
):  #netStructure is a list [in, hidden, out], momentum is a parameter in SGD
    sampleNum = trainFeature.shape[0]
    featureNum = trainFeature.shape[1]
    Dataset = SupervisedDataSet(featureNum, 1)
    i = 0
    while (i < sampleNum):
        print(i)
        Dataset.addSample(list(trainFeature[i]), [trainLabel[i]])
        i += 1
    Network = buildNetwork(netStructure[0],
                           netStructure[1],
                           netStructure[2],
                           netStructure[3],
                           hiddenclass=SigmoidLayer,
                           outclass=SigmoidLayer)
    T = BackpropTrainer(Network,
                        Dataset,
                        learningrate=para_rate,
                        momentum=para_momentum,
                        verbose=True)
    #print(Dataset['input'])
    errorList = []
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    errorList.append(T.testOnData(Dataset))
    T.trainOnDataset(Dataset)
    while (abs(T.testOnData(Dataset) - errorList[-1]) > 0.0001):
        T.trainOnDataset(Dataset)
        errorList.append(T.testOnData(Dataset))
    pass  #this step is for the output of predictedLabel
    print(np.array([Network.activate(x) for x in trainFeature]))
    #print(testLabel)
    print(
        Network.activate([
            0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0
        ]))
    return (errorList)
Esempio n. 6
0
'''
Created on Nov 21, 2011

@author: reza
'''
from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer

if __name__ == '__main__':
    dataset = SupervisedDataSet(2, 1)
    dataset.addSample([0, 0], [0])
    dataset.addSample([0, 1], [1])
    dataset.addSample([1, 0], [1])
    dataset.addSample([1, 1], [0])
    
    network = buildNetwork(2, 4, 1)
    trainer = BackpropTrainer(network, learningrate = 0.01, momentum = 0.2,
                              verbose = False)
    
    print 'MSE before', trainer.testOnData(dataset)
    trainer.trainOnDataset(dataset, 1000)
    print 'MSE after', trainer.testOnData(dataset)
    
    z = network.activate([0, 0])
    print z
    
    print 'Final Weights: ', network.params
    
    
Esempio n. 7
0
    DS = SupervisedDataSet(5, 1)

    #adding datasets to the network
    for i in range(0, len(normal_array[0])):
        #       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i],normal_array[6][i],normal_array[7][i]])
        DS.addSample([
            normal_array[0][i], normal_array[1][i], normal_array[2][i],
            normal_array[3][i], normal_array[4][i]
        ], [normal_array[5][i]])

#    NN = buildNetwork(5,4,3,bias =True,hiddenclass=TanhLayer)
    NN = buildNetwork(DS.indim, 5, DS.outdim, bias=True, hiddenclass=TanhLayer)
    TRAINER = BackpropTrainer(NN, dataset=DS, learningrate=0.01, momentum=0.99)

    print 'MSE before', TRAINER.testOnData(DS)
    TRAINER.trainOnDataset(DS, 500)
    print 'MSE after', TRAINER.testOnData(DS)

    # testing
    #clearing arrays
    normal_array = [[], [], [], [], [], [], [], []]
    normalized_input = [[], [], [], [], []]
    max_array = [[], [], [], [], [], [], [], []]
    min_array = [[], [], [], [], [], [], [], []]
    range_array = [[], [], [], [], [], [], [], []]
    x_axis = []
    pred_arr = []
    act_arr = []
    training_normalization(args.f2, args.min, args.max)
    for i in range(len(normal_array[0])):
        prediction = NN.activate([
Esempio n. 8
0
   
    training_normalization(args.f1,args.min,args.max)
    #initialize dataset for neural network with 5 input + bias and 3 target 
    DS = SupervisedDataSet(5,1)

    #adding datasets to the network
    for i in range (0,len(normal_array[0])):
#       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i],normal_array[6][i],normal_array[7][i]])
       DS.addSample([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]],[normal_array[5][i]])

#    NN = buildNetwork(5,4,3,bias =True,hiddenclass=TanhLayer)
    NN = buildNetwork(DS.indim,5,DS.outdim,bias = True,hiddenclass=TanhLayer)
    TRAINER = BackpropTrainer(NN,dataset=DS,learningrate = 0.01,momentum = 0.99)

    print 'MSE before',TRAINER.testOnData(DS)
    TRAINER.trainOnDataset(DS,500)
    print 'MSE after',TRAINER.testOnData(DS)

# testing 
#clearing arrays
    normal_array           = [[],[],[],[],[],[],[],[]]
    normalized_input  = [[],[],[],[],[]]
    max_array                 = [[],[],[],[],[],[],[],[]]
    min_array                  = [[],[],[],[],[],[],[],[]]   
    range_array              = [[],[],[],[],[],[],[],[]]
    x_axis                          = []
    pred_arr           = []
    act_arr                  = []
    training_normalization(args.f2,args.min,args.max)
    for i in range (len(normal_array[0])):
        prediction= NN.activate([normal_array[0][i],normal_array[1][i],normal_array[2][i],normal_array[3][i],normal_array[4][i]])
Esempio n. 9
0
class NeuralNetwork:
	""" Neural network class """
	def __init__(self, num_inputs, num_outputs, num_hidden_layers):	### Neural Network Variables ###
		self.num_inputs = num_inputs;				# Number of inputs
		self.num_outputs = num_outputs;				# Number of outputs
		self.num_hidden_layers = num_hidden_layers;		# Number of hidden layers
		self.dataset = None;					# Dataset object
		self.network = None;					# Network object
		self.backprop = None;					# Backpropogation object
		self.epochs = 0;					# Epochs
		self.RMSE = 0;						# RMSE
		self.training_time = 0;					# Calculated training time
		logger.info("New neural network object created.");

	def run(self, operator, epochs):
		logger.info("Starting up neural network for %s.", operator);
		self.epochs = epochs;
		self.buildDataset();
		self.buildNetwork();

		# XOR data
		if operator is "XOR":
			self.addData([0,0], [0]);
			self.addData([0,1], [1]);
			self.addData([1,0], [1]);
			self.addData([1,1], [0]);

		# NXOR data
		if operator is "NXOR":
			self.addData([0,0], [1]);
			self.addData([0,1], [0]);
			self.addData([1,0], [0]);
			self.addData([1,1], [1]);

		self.backProp();
		self.testData();
		self.trainData();
		self.testData();
		return ;

	def buildDataset(self):
		self.dataset = SupervisedDataSet(self.num_inputs, self.num_outputs);
		logger.info("Dataset object built.");
		return True;

	def addData(self, data_in, data_out):
		self.dataset.addSample(data_in, data_out);
		logger.info("Data appended.");
		return True;

	def buildNetwork(self):
		self.network = buildNetwork(self.num_inputs, self.num_hidden_layers, self.num_outputs);
		logger.info("Network created.");
		return True;

	def backProp(self):
		self.backprop = BackpropTrainer(self.network, learningrate = 0.01, momentum = 0.99);
		logger.info("Back propogation trainer created.");
		return True;

	def testData(self):
		self.RMSE = math.sqrt(self.backprop.testOnData(self.dataset));
		logger.info('RMSE: %s', str(self.RMSE));
		return True;

	def trainData(self):
		start = timeit.default_timer();
		self.backprop.trainOnDataset(self.dataset, self.epochs);
		stop = timeit.default_timer();
		self.training_time = stop - start;
		logger.info("Training time: %s", str(self.training_time));
		return True;
Esempio n. 10
0
class MonteCarlo(object):
    def __init__(self, **kwargs):

        self.max_depth = 0
        self.stats = {}

        self.calculation_time = float(kwargs.get('time', 1))
        self.max_moves = int(kwargs.get('max_moves', Board.BOARD_SIZE_SQ))

        # Exploration constant, increase for more exploratory moves,
        # decrease to prefer moves with known higher win rates.
        self.C = float(kwargs.get('C', 1.4))

        self.features_num = Board.BOARD_SIZE_SQ * 3 + 2
        self.hidden_neurons_num = self.features_num * 2
        self.net = buildNetwork(self.features_num,
                                self.hidden_neurons_num,
                                2,
                                bias=True,
                                outclass=SigmoidLayer)
        self.trainer = BackpropTrainer(self.net)

        self.total_sim = 0
        self.observation = []

    def select(self, board, moves, who, **kwargs):
        # Bail out early if there is no real choice to be made.
        if not moves:
            return
        if len(moves) == 1:
            return moves[0]

        if Game.on_training:
            self.calculation_time = 60
        else:
            self.calculation_time = 1

        self.max_depth = 0
        self.stats = {}
        games = 0
        begin = time.time()
        while time.time() - begin < self.calculation_time:
            self.sim(board)
            games += 1
            if games > 10:
                break

        self.stats.update(games=games,
                          max_depth=self.max_depth,
                          time=str(time.time() - begin))
        print(self.stats['games'], self.stats['time'])

        move, _ = self.get_best(board, moves, who)
        return move

    def sim(self, board):
        visited_path = []
        state = board
        winner = Board.STONE_EMPTY
        for _ in range(1, self.max_moves + 1):
            moves, player = Game.possible_moves(state)
            state_new, state_new_val = self.get_best(state, moves, player)
            visited_path.append((player, state, state_new, state_new_val))
            over, winner, _ = state_new.is_over(state)
            if over:
                break
            state = state_new

        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for player, state, new, val in visited_path:
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if player == winner:
                wins += 1
            ds.addSample(self.get_input_values(state, new, player),
                         (wins, plays))
        self.trainer.trainOnDataset(ds)

    def get_best(self, state, moves, who):
        outputs = []
        for s in moves:
            out = self.net.activate(self.get_input_values(state, s, who))
            outputs.append(out)
        a = np.array(outputs)
        b = a[:, 0] / a[:, 1] + self.C * np.log(np.sum(a[:, 1])) / a[:, 1]
        i = np.argmax(b)
        return moves[i], a[i]

    def get_input_values(self, board, new_board, who):
        v = board.stones
        sz = v.shape[0]
        iv = np.zeros(self.features_num)
        iv[0:sz] = (v == Board.STONE_BLACK).astype(int)
        iv[sz:sz * 2] = (v == Board.STONE_WHITE).astype(int)
        iv[sz * 2:sz * 3] = (new_board.stones != v).astype(int)
        iv[-2] = 1 if who == Board.STONE_BLACK else 0  # turn to black move
        iv[-1] = 1 if who == Board.STONE_WHITE else 0  # turn to white move
        return iv

    def swallow(self, who, st0, st1, **kwargs):
        self.observation.append((who, st0, st1))

    def absorb(self, winner, **kwargs):
        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for who, s0, s1 in self.observation:
            if who != Board.STONE_BLACK:
                continue
            input_vec = self.get_input_values(s0, s1, who)
            val = self.net.activate(input_vec)
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if who == winner:
                wins += 1
            ds.addSample(input_vec, (wins, plays))
        self.trainer.trainOnDataset(ds)

    def void(self):
        self.observation = []
class NeuralNetwork(object):
    '''Neural network wrapper for the pybrain implementation
    '''
    def __init__(self):
        self.path = os.path.dirname(
            os.path.abspath(__file__)) + "/../../../data/"
        self.net = None
        self.trainer = None

    def createNew(self, nInputs, nHiddenLayers, nOutput, bias):
        '''builds a new neural network
        
        :param int nInputs: the number of input nodes
        :param int nHiddenLayers: the number of hidden layers
        :parma int nOutputs: the number of output nodes
        :param bool bias: if True an bias node will be added
        
        :return: instance of a new neural network
        :rtype: NeuralNetwork
        '''
        self.net = buildNetwork(nInputs,
                                nHiddenLayers,
                                nOutput,
                                bias=bias,
                                hiddenclass=TanhLayer)
        return self

    def train(self,
              dataset,
              maxEpochs=10,
              learningrate=0.01,
              momentum=0.99,
              continueEpochs=10,
              validationProportion=0.25):
        '''trains a network with the given dataset
        
        :param SupervisedDataSet dataset: the training dataset
        :param int maxEpochs: max number of iterations to train the network
        :parma float learningrate: helps to 
        :param float momentum: helps out of local minima while training, to get better results
        '''
        self.trainer = BackpropTrainer(self.net,
                                       learningrate=learningrate,
                                       momentum=momentum)
        self.trainer.trainOnDataset(dataset, maxEpochs)

    def trainConvergence(self,
                         dataset,
                         maxEpochs=10,
                         learningrate=0.01,
                         momentum=0.99,
                         continueEpochs=10,
                         validationProportion=0.25):
        '''trains a network with the given dataset nutil it converges
        
        :param SupervisedDataSet dataset: the training dataset
        :param int maxEpochs: max number of iterations to train the network
        :parma float learningrate: helps to 
        :param float momentum: helps out of local minima while training, to get better results
        '''
        self.trainer = BackpropTrainer(self.net,
                                       learningrate=learningrate,
                                       momentum=momentum)
        self.trainer.trainUntilConvergence(dataset, maxEpochs, False,
                                           continueEpochs,
                                           validationProportion)

    def test(self, data=None, verbose=False):
        if not self.trainer:
            raise ValueError(
                "call train() first, to create a valid trainer object")
        return self.trainer.testOnData(data, verbose)

    def activate(self, value, rnd=False):
        inpt = self.net.activate(value)[0]
        if rnd:
            return self._clazz(inpt)
        return inpt

    def _clazz(self, inpt):
        clazz = round(inpt)
        if (clazz < 0):
            return 0
        if (clazz > 1):
            return 1
        return int(clazz)

    def _createFilePath(self, filePath, defaultPath=True):
        if defaultPath:
            return self.path + filePath + FILE_EXTENSION
        return filePath + FILE_EXTENSION

    def save(self, filePath, defaultPath=True):
        '''saves the neural network
        
        :param string filePath: filepath for the to be saved network
        '''
        with open(self._createFilePath(filePath, defaultPath), 'w') as f:
            pickle.dump(self.net, f)

    def load(self, filePath, defaultPath=True):
        '''loades the neural network
        
        :param string filePath: filepath for the to be loaded network
        
        :return: instance of a saved neural network
        :rtype: NeuralNetwork
        '''
        with open(self._createFilePath(filePath, defaultPath), 'r') as f:
            self.net = pickle.load(f)
            return self

    def __repr__(self):
        return "%s\n%s" % (self.__class__.__name__, str(self.net))
Esempio n. 12
0
class MonteCarlo(object):
    def __init__(self, **kwargs):

        self.max_depth = 0
        self.stats = {}

        self.calculation_time = float(kwargs.get('time', 1))
        self.max_moves = int(kwargs.get('max_moves', Board.BOARD_SIZE_SQ))

        # Exploration constant, increase for more exploratory moves,
        # decrease to prefer moves with known higher win rates.
        self.C = float(kwargs.get('C', 1.4))

        self.features_num = Board.BOARD_SIZE_SQ * 3 + 2
        self.hidden_neurons_num = self.features_num * 2
        self.net = buildNetwork(self.features_num, self.hidden_neurons_num, 2, bias=True, outclass=SigmoidLayer)
        self.trainer = BackpropTrainer(self.net)

        self.total_sim = 0
        self.observation = []


    def select(self, board, moves, who, **kwargs):
        # Bail out early if there is no real choice to be made.
        if not moves:
            return
        if len(moves) == 1:
            return moves[0]

        if Game.on_training:
            self.calculation_time = 60
        else:
            self.calculation_time = 1

        self.max_depth = 0
        self.stats = {}
        games = 0
        begin = time.time()
        while time.time() - begin < self.calculation_time:
            self.sim(board)
            games += 1
            if games > 10:
                break

        self.stats.update(games=games, max_depth=self.max_depth, time=str(time.time() - begin))
        print(self.stats['games'], self.stats['time'])

        move, _ = self.get_best(board, moves, who)
        return move


    def sim(self, board):
        visited_path = []
        state = board
        winner = Board.STONE_EMPTY
        for _ in range(1, self.max_moves + 1):
            moves, player, _ = Game.possible_moves(state)
            state_new, state_new_val = self.get_best(state, moves, player)
            visited_path.append((player, state, state_new, state_new_val))
            over, winner, _ = state_new.is_over(state)
            if over:
                break
            state = state_new

        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for player, state, new, val in visited_path:
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if player == winner:
                wins += 1
            ds.addSample(self.get_input_values(state, new, player), (wins, plays))
        self.trainer.trainOnDataset(ds)


    def get_best(self, state, moves, who):
        outputs = []
        for s in moves:
            out = self.net.activate(self.get_input_values(state, s, who))
            outputs.append(out)
        a = np.array(outputs)
        b = a[:, 0] / a[:, 1] + self.C * np.log(np.sum(a[:, 1])) / a[:, 1]
        i = np.argmax(b)
        return moves[i], a[i]


    def get_input_values(self, board, new_board, who):
        v = board.stones
        sz = v.shape[0]
        iv = np.zeros(self.features_num)
        iv[0:sz] = (v == Board.STONE_BLACK).astype(int)
        iv[sz:sz * 2] = (v == Board.STONE_WHITE).astype(int)
        iv[sz * 2:sz * 3] = (new_board.stones != v).astype(int)
        iv[-2] = 1 if who == Board.STONE_BLACK else 0  # turn to black move
        iv[-1] = 1 if who == Board.STONE_WHITE else 0  # turn to white move
        return iv


    def swallow(self, who, st0, st1, **kwargs):
        self.observation.append((who, st0, st1))

    def absorb(self, winner, **kwargs):
        self.total_sim += 1

        ds = SupervisedDataSet(self.features_num, 2)
        for who, s0, s1 in self.observation:
            if who != Board.STONE_BLACK:
                continue
            input_vec = self.get_input_values(s0, s1, who)
            val = self.net.activate(input_vec)
            plays = val[1] * self.total_sim + 1
            wins = val[0] * self.total_sim
            if who == winner:
                wins += 1
            ds.addSample(input_vec, (wins, plays))
        self.trainer.trainOnDataset(ds)

    def void(self):
        self.observation = []
Esempio n. 13
0
class NeuralNetwork(object):
    '''Neural network wrapper for the pybrain implementation
    '''

    def __init__(self):
        self.path = os.path.dirname(os.path.abspath(__file__)) + "/../../data/"
        self.net = None
        self.trainer = None

    def createNew(self, nInputs, nHiddenLayers, nOutput, bias):
        '''builds a new neural network
        
        :param int nInputs: the number of input nodes
        :param int nHiddenLayers: the number of hidden layers
        :parma int nOutputs: the number of output nodes
        :param bool bias: if True an bias node will be added
        
        :return: instance of a new neural network
        :rtype: NeuralNetwork
        '''
        self.net = buildNetwork(nInputs, nHiddenLayers, nOutput, bias=bias, hiddenclass=TanhLayer)
        return self

    def train(self, dataset, maxEpochs = 10, learningrate = 0.01, momentum = 0.99, continueEpochs=10, validationProportion=0.25):
        '''trains a network with the given dataset
        
        :param SupervisedDataSet dataset: the training dataset
        :param int maxEpochs: max number of iterations to train the network
        :parma float learningrate: helps to 
        :param float momentum: helps out of local minima while training, to get better results
        '''
        self.trainer = BackpropTrainer(self.net, learningrate = learningrate, momentum = momentum)
        self.trainer.trainOnDataset(dataset, maxEpochs)

    def trainConvergence(self, dataset, maxEpochs = 10, learningrate = 0.01, momentum = 0.99, continueEpochs=10, validationProportion=0.25):
        '''trains a network with the given dataset nutil it converges
        
        :param SupervisedDataSet dataset: the training dataset
        :param int maxEpochs: max number of iterations to train the network
        :parma float learningrate: helps to 
        :param float momentum: helps out of local minima while training, to get better results
        '''
        self.trainer = BackpropTrainer(self.net, learningrate = learningrate, momentum = momentum)
        self.trainer.trainUntilConvergence(dataset, maxEpochs, False, continueEpochs, validationProportion)

    def test(self, data=None, verbose=False):
        if not self.trainer:
            raise ValueError("call train() first, to create a valid trainer object") 
        return self.trainer.testOnData(data, verbose)

    def activate(self, value, rnd=False):
        inpt = self.net.activate(value)[0]
        if rnd:
            return self._clazz(inpt)
        return inpt

    def _clazz(self, inpt):
        clazz = round(inpt)
        if (clazz < 0):
            return 0
        if (clazz > 1):
            return 1
        return int(clazz)

    def save(self, name):
        '''saves the neural network
        
        :param string name: filename for the to be saved network
        '''
        f = open(self.path + name + FILE_EXTENSION, 'w')
        pickle.dump(self.net, f)
        f.close()

    def load(self, name):
        '''loades the neural network
        
        :param string name: filename for the to be loaded network
        
        :return: instance of a saved neural network
        :rtype: NeuralNetwork
        '''
        f = open(self.path + name + FILE_EXTENSION, 'r')
        self.net = pickle.load(f)
        f.close()
        return self

    def __repr__(self):
        return "%s\n%s" % (self.__class__.__name__, str(self.net))
Esempio n. 14
0
__author__ = 'Stubborn'


from pybrain.datasets.supervised import SupervisedDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers.backprop import BackpropTrainer

D = SupervisedDataSet(2, 1)
# 2 imput --> 1 output

D.addSample([0,0], [0])
D.addSample([0,1], [1])
D.addSample([1,0], [1])
D.addSample([1,1], [0])
# 4 kombinationer av input och dess output "OR" funktion

N = buildNetwork(2, 4, 1)
# multilayer perception? med 1 gomt lager

T = BackpropTrainer(N, learningrate = 0.01, momentum = 0.99)
# momentum = reduced learningrate

print (('MSE before'), T.testOnData(D))
T.trainOnDataset(D, 1000)
T.trainUntilConvergence()
print (('MSE after'), T.testOnData(D))
print D