Beispiel #1
0
    def __init__(self, dimensions=0, k_neighbors=5, use_kernel=False):
        '''
        Constructor for FaceRecognizer.

        Args (optional):
            k_rank (int): How many principal components to keep.
            k_neighbors (int): How many neighbors to compare against in the
                kNN classifier.
        '''

        self.pca_model = PCAModel(dimensions=dimensions, use_kernel=use_kernel)
        self.knn_classifier = KNNClassifier(neighbors=k_neighbors)
        self.instances = None
Beispiel #2
0
    def __init__(self, dimensions=0, k_neighbors=5, use_kernel=False):
        '''
        Constructor for FaceRecognizer.

        Args (optional):
            k_rank (int): How many principal components to keep.
            k_neighbors (int): How many neighbors to compare against in the
                kNN classifier.
        '''

        self.pca_model = PCAModel(dimensions=dimensions, use_kernel=use_kernel)
        self.knn_classifier = KNNClassifier(neighbors=k_neighbors)
        self.instances = None
def main():
    print("learnTicTacToeWithComparisonNet.py main()")

    authority = tictactoe.Authority()
    positionTensorShape = authority.PositionTensorShape()
    moveTensorShape = authority.MoveTensorShape()
    playerList = authority.PlayersList()

    if args.startWithNeuralNetwork is not None:
        raise NotImplementedError(
            "main(): Start with a neural network is not implemented...")
    else:
        if args.startWithAutoencoder is not None:
            autoencoderNet = autoencoder.position.Net()
            autoencoderNet.Load(args.startWithAutoencoder)

            decoderClassifier = ComparisonNet.BuildADecoderClassifierFromAnAutoencoder(
                autoencoderNet, dropoutRatio=0.25)
        else:
            raise NotImplementedError(
                "main(): Starting without an autoencoder is not implemented..."
            )

    # Create the optimizer
    logging.debug(decoderClassifier)
    for name, param in decoderClassifier.named_parameters():
        if 'decoding' in name:
            param.requires_grad = True
        else:
            param.requires_grad = False
        print("name = {}; param.requires_grad = {}".format(
            name, param.requires_grad))

    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        decoderClassifier.parameters()),
                                 lr=args.learningRate,
                                 betas=(0.5, 0.999))

    # Loss function
    loss = torch.nn.CrossEntropyLoss(
    )  # The neural network is a binary classifier

    # Initial learning rate
    learningRate = args.learningRate

    # Output monitoring file
    epochLossFile = open(os.path.join(args.outputDirectory, 'epochLoss.csv'),
                         "w",
                         buffering=1)  # Flush the buffer at each line
    epochLossFile.write(
        "epoch,trainingLoss,validationLoss,validationAccuracy,averageReward,winRate,drawRate,lossRate\n"
    )

    # First game with a random player, before any training
    decoderClassifier.eval()
    (numberOfWinsForComparator, numberOfWinsForRandomPlayer,
     numberOfDraws) = Comparison.SimulateGamesAgainstARandomPlayer(
         decoderClassifier, authority, args.numberOfGamesAgainstARandomPlayer)
    print(
        "(numberOfWinsForComparator, numberOfWinsForRandomPlayer, numberOfDraws) = ({}, {}, {})"
        .format(numberOfWinsForComparator, numberOfWinsForRandomPlayer,
                numberOfDraws))

    winRate = numberOfWinsForComparator / (numberOfWinsForComparator +
                                           numberOfWinsForRandomPlayer +
                                           numberOfDraws)
    lossRate = numberOfWinsForRandomPlayer / (numberOfWinsForComparator +
                                              numberOfWinsForRandomPlayer +
                                              numberOfDraws)
    drawRate = numberOfDraws / (numberOfWinsForComparator +
                                numberOfWinsForRandomPlayer + numberOfDraws)
    logging.info(
        "Against a random player, winRate = {}; drawRate = {}; lossRate = {}".
        format(winRate, drawRate, lossRate))

    epochLossFile.write('0' + ',' + '-' + ',' + '-' + ',' + '-,' +
                        str(winRate - lossRate) + ',' + str(winRate) + ',' +
                        str(drawRate) + ',' + str(lossRate) + '\n')

    latentRepresentationFile = open(os.path.join(args.outputDirectory,
                                                 'latentRepresentation.csv'),
                                    "w",
                                    buffering=1)

    #playerToEpsilonDict = {playerList[0]: args.epsilon, playerList[1]: args.epsilon}
    epsilon = args.epsilon

    for epoch in range(1, args.numberOfEpochs + 1):
        logging.info("Epoch {}".format(epoch))
        decoderClassifier.train()

        if epoch % 100 == -1:
            learningRate = learningRate / 2
            for param_group in optimizer.param_groups:
                param_group['lr'] = learningRate
            #epsilon = epsilon/2
        """if (epoch // 25) %2 == 0: # Optimize decoding
            for name, param in decoderClassifier.named_parameters():
                if 'decoding' in name:
                    param.requires_grad = True
                else:
                    param.requires_grad = False
            logging.info("Optimizing decoding layers")
        else: # Optimize encoding
            for name, param in decoderClassifier.named_parameters():
                if 'decoding' in name:
                    param.requires_grad = False
                else:
                    param.requires_grad = True
            logging.info("Optimizing encoding layers")

        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, decoderClassifier.parameters()),
                                     lr=learningRate,
                                     betas=(0.5, 0.999))
        """
        if epoch > 1 and epoch % 200 == 1:
            epsilon = epsilon / 2

        # Generate positions
        if epoch % recomputingPeriod == 1:
            minimumNumberOfMovesForInitialPositions = MinimumNumberOfMovesForInitialPositions(
                epoch)
            maximumNumberOfMovesForInitialPositions = args.maximumNumberOfMovesForInitialPositions
            logging.info("Generating positions...")
            startingPositionsList = Comparison.SimulateRandomGames(
                authority,
                minimumNumberOfMovesForInitialPositions,
                maximumNumberOfMovesForInitialPositions,
                args.numberOfPositionsForTraining,
                swapIfOddNumberOfMoves=True)

            numberOfMajorityX, numberOfMajorityO, numberOfEqualities = Majority(
                startingPositionsList)
            #print ("numberOfMajorityX = {}; numberOfMajorityO = {}; numberOfEqualities = {}".format(numberOfMajorityX, numberOfMajorityO, numberOfEqualities))
            #print ("main(): startingPositionsList = {}".format(startingPositionsList))

            startingPositionsTensor, augmentedStartingPositionsList = StartingPositionsInPairsOfPossibleOptions(
                startingPositionsList, authority)
            #print ("main(): augmentedStartingPositionsList = {}".format(augmentedStartingPositionsList))
            #print ("main(): startingPositionsTensor.shape = {}".format(startingPositionsTensor.shape))
            #print ("main(): startingPositionsTensor = {}".format(startingPositionsTensor))

            logging.info("Comparing starting position pairs...")
            decoderClassifier.eval()
            pairWinnerIndexList = Comparison.ComparePositionPairs(
                authority,
                decoderClassifier,
                augmentedStartingPositionsList,
                args.numberOfSimulations,
                epsilon=0,
                playerToEpsilonDict={
                    playerList[0]: epsilon,
                    playerList[1]: epsilon
                })
            #print ("pairWinnerIndexList = {}".format(pairWinnerIndexList))

            pairWinnerIndexTsr = PairWinnerIndexTensor(pairWinnerIndexList)

        decoderClassifier.train()
        # Since the samples are generated dynamically, there is no need for minibatches: all samples are always new
        optimizer.zero_grad()

        # Forward pass
        outputTensor = decoderClassifier(startingPositionsTensor)

        # Calculate the error and backpropagate
        trainingLoss = loss(outputTensor, pairWinnerIndexTsr)
        logging.info("trainingLoss.item() = {}".format(trainingLoss.item()))

        trainingLoss.backward()

        # Move in the gradient descent direction
        optimizer.step()

        gradient0AbsMean = decoderClassifier.Gradient0AbsMean()
        logging.debug("gradient0AbsMean = {}".format(gradient0AbsMean))

        # ******************  Validation ******************
        decoderClassifier.eval()

        if epoch % 200 == 1:
            logging.info("Generating validation positions...")
            validationStartingPositionsList = Comparison.SimulateRandomGames(
                authority,
                minimumNumberOfMovesForInitialPositions,
                maximumNumberOfMovesForInitialPositions,
                args.numberOfPositionsForValidation,
                swapIfOddNumberOfMoves=True)
            """for validationStartingPositionNdx in range(len(validationStartingPositionsList)):
                if numpy.random.random() >= 0.5:
                    swappedPosition = authority.SwapPositions(validationStartingPositionsList[validationStartingPositionNdx], playerList[0], playerList[1])
                    validationStartingPositionsList[validationStartingPositionNdx] = swappedPosition
            """
            # print ("main(): startingPositionsList = {}".format(startingPositionsList))

            validationStartingPositionsTensor, validationAugmentedStartingPositionsList = \
                StartingPositionsInPairsOfPossibleOptions(validationStartingPositionsList, authority)

            logging.info("Comparing validation starting position pairs...")
            validationPairWinnerIndexList = Comparison.ComparePositionPairs(
                authority,
                decoderClassifier,
                validationAugmentedStartingPositionsList,
                args.numberOfSimulations,
                epsilon=0,
                playerToEpsilonDict={
                    playerList[0]: epsilon,
                    playerList[1]: epsilon
                })  # Start with purely random games (epsilon = 1)
            validationPairWinnerIndexTsr = PairWinnerIndexTensor(
                validationPairWinnerIndexList)

        # Forward pass
        validationOutputTensor = decoderClassifier(
            validationStartingPositionsTensor)

        # Calculate the validation error
        validationLoss = loss(validationOutputTensor,
                              validationPairWinnerIndexTsr)

        # Calculate the validation accuracy
        validationAccuracy = Accuracy(validationOutputTensor,
                                      validationPairWinnerIndexTsr)

        logging.info(
            "validationLoss.item() = {};    validationAccuracy = {}".format(
                validationLoss.item(), validationAccuracy))

        # Check if latent representation of pairs are the same
        validationLatentRepresentationTsr = decoderClassifier.DecodingLatentRepresentation(
            decodingLayerNdx=1, inputTsr=validationStartingPositionsTensor)
        print(
            "validationStartingPositionsTensor.shape = {}; validationLatentRepresentationTsr.shape = {}"
            .format(validationStartingPositionsTensor.shape,
                    validationLatentRepresentationTsr.shape))
        for pairNdx in range(validationLatentRepresentationTsr.shape[0] // 2):
            if torch.max(
                    torch.abs(validationLatentRepresentationTsr[2 * pairNdx] -
                              validationLatentRepresentationTsr[2 * pairNdx +
                                                                1])
            ).item() < dead_neuron_zero_threshold:
                logging.warning(
                    "Pair {} and {} have an identical latent representation".
                    format(2 * pairNdx, 2 * pairNdx + 1))
                print("validationStartingPositionsTensor[2 * pairNdx] = {}".
                      format(validationStartingPositionsTensor[2 * pairNdx]))
                print(
                    "validationStartingPositionsTensor[2 * pairNdx + 1] = {}".
                    format(validationStartingPositionsTensor[2 * pairNdx + 1]))

        logging.info("Play against a random player...")
        # Play against a random player
        (numberOfWinsForEvaluator, numberOfWinsForRandomPlayer,
         numberOfDraws) = Comparison.SimulateGamesAgainstARandomPlayer(
             decoderClassifier, authority,
             args.numberOfGamesAgainstARandomPlayer)

        winRate = numberOfWinsForEvaluator / (numberOfWinsForEvaluator +
                                              numberOfWinsForRandomPlayer +
                                              numberOfDraws)
        lossRate = numberOfWinsForRandomPlayer / (numberOfWinsForEvaluator +
                                                  numberOfWinsForRandomPlayer +
                                                  numberOfDraws)
        drawRate = numberOfDraws / (numberOfWinsForEvaluator +
                                    numberOfWinsForRandomPlayer +
                                    numberOfDraws)
        logging.info(
            "Against a random player, winRate = {}; drawRate = {}; lossRate = {}"
            .format(winRate, drawRate, lossRate))

        epochLossFile.write(
            str(epoch) + ',' + str(trainingLoss.item()) + ',' +
            str(validationLoss.item()) + ',' + str(validationAccuracy) + ',' +
            str(winRate - lossRate) + ',' + str(winRate) + ',' +
            str(drawRate) + ',' + str(lossRate) + '\n')

        # Write validation latent representations
        logging.info("Validation latent representation...")
        validationLatentRepresentationTsr = decoderClassifier.DecodingLatentRepresentation(
            decodingLayerNdx=7, inputTsr=validationStartingPositionsTensor)
        # validationLatentRepresentation1Tsr.shape = torch.Size([ 2 * numberOfPositionsForValidation, decoderClassifier.decodingIntermediateNumberOfNeurons ])
        validationLatentRepresentationArr = validationLatentRepresentationTsr.detach(
        ).numpy()
        pcaModel = PCAModel.PCAModel(validationLatentRepresentationArr,
                                     pca_zero_threshold)
        pcaModel.TruncateModel(2)
        validationProjections = pcaModel.Project(
            validationLatentRepresentationArr)
        validationProjectionsTsr = torch.from_numpy(validationProjections)
        validationProjectionsTsr = torch.cat(
            (validationProjectionsTsr,
             torch.argmax(validationOutputTensor,
                          dim=1).unsqueeze(1).double()), 1)
        validationProjectionsTsr = torch.cat(
            (validationProjectionsTsr,
             validationPairWinnerIndexTsr.unsqueeze(1).double()), 1)
        numpy.savetxt(latentRepresentationFile,
                      [validationProjectionsTsr.numpy().flatten()],
                      delimiter=',')

        if epoch % 10 == 0:
            filepath = os.path.join(args.outputDirectory,
                                    'tictactoe_' + str(epoch) + '.bin')
            decoderClassifier.Save(filepath)

            epsilon0GamePositionsList, epsilon0GameWinner = Comparison.SimulateAGame(
                decoderClassifier, authority)
            for position in epsilon0GamePositionsList:
                authority.Display(position)
                print(".............\n")

        # Reinitialize for dead neurons
        ReinitializeDeadNeurons(decoderClassifier, startingPositionsTensor)
Beispiel #4
0
class FaceRecognizer:
    def __init__(self, dimensions=0, k_neighbors=5, use_kernel=False):
        '''
        Constructor for FaceRecognizer.

        Args (optional):
            k_rank (int): How many principal components to keep.
            k_neighbors (int): How many neighbors to compare against in the
                kNN classifier.
        '''

        self.pca_model = PCAModel(dimensions=dimensions, use_kernel=use_kernel)
        self.knn_classifier = KNNClassifier(neighbors=k_neighbors)
        self.instances = None

    def train(self, instances):
        '''
        Trains the recognizer with a set of faces.

        Args:
            instances (list<tuple<int, numpy.ndarray>>): List of label/face
                data pairs.
        '''

        self.instances = instances

        # Stack all of the faces together

        faces_list = list()
        for instance in instances:
            faces_list.append(instance[1])

        faces = np.vstack(faces_list).T

        # Learn principal components

        self.pca_model.fit(faces)

        # Add each class to the kNN classifier

        for instance in instances:
            label = instance[0]
            face = instance[1]
            t_face = self.pca_model.transform(face)
            self.knn_classifier.add_sample(label, t_face)

    def fit_knn(self):
        '''
        Fits the kNN classifier with the current instances.
        '''

        if self.instances is None:
            raise RuntimeError('FaceRecognizer has no instances')

        self.knn_classifier.reset()

        for instance in self.instances:
            label = instance[0]
            face = instance[1]
            t_face = self.pca_model.transform(face)
            self.knn_classifier.add_sample(label, t_face)

    def classify(self, face):
        '''
        Classifies a given face from the trained set.

        Args:
            face (numpy.ndarray): The face to classify.
        Returns:
            int, the class the face best belongs to.
        '''

        t_face = self.pca_model.transform(face)
        return self.knn_classifier.classify(t_face)

    def set_dimensions(self, dimensions):
        '''
        Sets the number of dimensions to use from PCA.

        Args:
            dimensions (int): The new number of dimensions.
        '''

        self.pca_model.dimensions = dimensions

        if self.instances is not None:
            self.fit_knn()

    def set_k_neighbors(self, k):
        '''
        Sets k for kNN classifier.

        Args:
            k (int): The new k for the classifier.
        '''

        self.knn_classifier.neighbors = k

    def set_kernel_variance(self, variance):
        '''
        Sets the variance for the RBF kernel and retrains.

        Args:
            variance (float): The new variance.
        '''

        self.pca_model.variance = variance

        if self.instances is not None and self.pca_model.use_kernel:
            self.train(self.instances)
Beispiel #5
0
class FaceRecognizer:

    def __init__(self, dimensions=0, k_neighbors=5, use_kernel=False):
        '''
        Constructor for FaceRecognizer.

        Args (optional):
            k_rank (int): How many principal components to keep.
            k_neighbors (int): How many neighbors to compare against in the
                kNN classifier.
        '''

        self.pca_model = PCAModel(dimensions=dimensions, use_kernel=use_kernel)
        self.knn_classifier = KNNClassifier(neighbors=k_neighbors)
        self.instances = None


    def train(self, instances):
        '''
        Trains the recognizer with a set of faces.

        Args:
            instances (list<tuple<int, numpy.ndarray>>): List of label/face
                data pairs.
        '''

        self.instances = instances

        # Stack all of the faces together

        faces_list = list()
        for instance in instances:
            faces_list.append(instance[1])

        faces = np.vstack(faces_list).T

        # Learn principal components

        self.pca_model.fit(faces)

        # Add each class to the kNN classifier

        for instance in instances:
            label = instance[0]
            face  = instance[1]
            t_face = self.pca_model.transform(face)
            self.knn_classifier.add_sample(label, t_face)


    def fit_knn(self):
        '''
        Fits the kNN classifier with the current instances.
        '''

        if self.instances is None:
            raise RuntimeError('FaceRecognizer has no instances')

        self.knn_classifier.reset()

        for instance in self.instances:
            label = instance[0]
            face  = instance[1]
            t_face = self.pca_model.transform(face)
            self.knn_classifier.add_sample(label, t_face)


    def classify(self, face):
        '''
        Classifies a given face from the trained set.

        Args:
            face (numpy.ndarray): The face to classify.
        Returns:
            int, the class the face best belongs to.
        '''

        t_face = self.pca_model.transform(face)
        return self.knn_classifier.classify(t_face)


    def set_dimensions(self, dimensions):
        '''
        Sets the number of dimensions to use from PCA.

        Args:
            dimensions (int): The new number of dimensions.
        '''

        self.pca_model.dimensions = dimensions

        if self.instances is not None:
            self.fit_knn()


    def set_k_neighbors(self, k):
        '''
        Sets k for kNN classifier.

        Args:
            k (int): The new k for the classifier.
        '''

        self.knn_classifier.neighbors = k


    def set_kernel_variance(self, variance):
        '''
        Sets the variance for the RBF kernel and retrains.

        Args:
            variance (float): The new variance.
        '''

        self.pca_model.variance = variance

        if self.instances is not None and self.pca_model.use_kernel:
            self.train(self.instances)