示例#1
0
class Data():
    def __init__(self):
        """
        Sets up all default requirements and placeholders 
        needed for the Caffe Acute Lymphoblastic Leukemia CNN data script.
        """

        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfs()
        self.logFile = self.Helpers.setLogFile(
            self.confs["Settings"]["Logs"]["allCNN"])

        self.CaffeHelpers = CaffeHelpers(self.confs, self.Helpers,
                                         self.logFile)

        self.Helpers.logMessage(self.logFile, "allCNN", "Status",
                                "Data init complete")

    def sortData(self):
        """
        Prepares the data ready for training.
        """

        self.CaffeHelpers.deleteLMDB()
        self.CaffeHelpers.sortLabels()
        self.CaffeHelpers.sortTrainingData()
        self.CaffeHelpers.recreatePaperData()
        self.CaffeHelpers.createTrainingLMDB()
        self.CaffeHelpers.createValidationLMDB()
        self.CaffeHelpers.computeMean()

        self.Helpers.logMessage(self.logFile, "allCNN", "Status",
                                "Data sorting complete")
class Server():
    """ ALL Detection System 2019 Facial Authentication Server Class

    ALL Detection System 2019 Facial Authentication Server. 
    """
    def __init__(self):
        """ Initializes the Server class. """

        # Server setup
        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Helpers.confs["Server"]["Logs"] + "/Server")

        # Movidius setup
        self.Movidius = Movidius(self.Helpers.confs["Server"]["Logs"])
        self.Movidius.checkNCS()

        self.ValidDir = self.Helpers.confs["Classifier"]["ValidPath"]
        self.TestingDir = self.Helpers.confs["Classifier"]["TestingPath"]

        self.Helpers.logMessage(self.LogFile, "Facial Recognition Server",
                                "STATUS", "Movidius configured")

        # Facenet setup
        self.Facenet = Facenet(self.Helpers.confs["Server"]["Logs"])

        self.Movidius.allocateGraph(self.Facenet.LoadGraph())

        self.Facenet.PreprocessKnown(self.ValidDir, self.Movidius.Graph)

        self.Helpers.logMessage(self.LogFile, "Facial Recognition Server",
                                "STATUS", "Facenet configured")
示例#3
0
class Data():
        
    ###############################################################
    #
    # Core Data class wrapper.
    #
    ###############################################################

    def __init__(self):
        
        ###############################################################
        #
        # Sets up all default requirements and placeholders 
        # needed for this class. 
        #
        ###############################################################
        
        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfs()
        self.logFile = self.Helpers.setLogFile(self.confs["Settings"]["Logs"]["DataLogDir"])
        
        self.DataProcess = DataProcess()
        self.labelsToName = {}
        
        self.Helpers.logMessage(self.logFile, "init", "INFO", "Init complete")

    def sortData(self):
        
        ###############################################################
        #
        # Sorts the data 
        #
        ###############################################################

        humanStart, clockStart = self.Helpers.timerStart()
        self.Helpers.logMessage(self.logFile, "sortData", "INFO", "Loading & preparing training data")
        
        dataPaths, classes = self.DataProcess.processFilesAndClasses()

        classId = [ int(i) for i in classes]
        classNamesToIds = dict(zip(classes, classId))

        # Divide the training datasets into train and test
        numValidation = int(self.confs["Classifier"]["ValidationSize"] * len(dataPaths))
        self.Helpers.logMessage(self.logFile, "sortData", "Validation Size", str(numValidation))
        self.Helpers.logMessage(self.logFile, "sortData", "Class Size", str(len(classes)))
        random.seed(self.confs["Classifier"]["RandomSeed"])
        random.shuffle(dataPaths)
        trainingFiles = dataPaths[numValidation:]
        validationFiles = dataPaths[:numValidation]

        # Convert the training and validation sets
        self.DataProcess.convertToTFRecord('train', trainingFiles, classNamesToIds)
        self.DataProcess.convertToTFRecord('validation', validationFiles, classNamesToIds)

        # Write the labels to file
        labelsToClassNames = dict(zip(classId, classes))
        self.DataProcess.writeLabels(labelsToClassNames)
        self.Helpers.logMessage(self.logFile, "sortData", "COMPLETE", "Completed sorting data!")
示例#4
0
class Client():
    def __init__(self):

        self.Helpers = Helpers()

        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfigs()
        self.LogFile = self.Helpers.setLogFile(self.confs["aiCore"]["Logs"] +
                                               "Client/")

        self.apiUrl = "http://" + self.confs["aiCore"]["IP"] + ":" + str(
            self.confs["aiCore"]["Port"]) + "/infer"
        self.headers = {"content-type": 'application/json'}

        self.Helpers.logMessage(self.LogFile, "CLIENT", "INFO", "Client Ready")
class Client():
    """ ALL Detection System 2019 Chatbot Client Class

    Sends requests to the ALL Detection System 2019 Chatbot.
    """
    def __init__(self):
        """ Initializes the Chatbot Client class. """

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Helpers.confs["System"]["Logs"] + "Client/")

        self.apiUrl = "http://" + self.Helpers.confs["System"][
            "IP"] + ":" + str(self.Helpers.confs["System"]["Port"]) + "/infer"
        self.headers = {"content-type": 'application/json'}

        self.Helpers.logMessage(self.LogFile, "CLIENT", "INFO", "Client Ready")
示例#6
0
class Client():
    """ ALL Detection System 2019 Facial Authentication Client Class

    Client for ALL Detection System 2019 Facial Authentication Server. 
    """
    def __init__(self):
        """ Initializes the Server class. """

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Helpers.confs["Server"]["Logs"] + "/Client")

        self.addr = "http://" + self.Helpers.confs["Server"]["IP"] + ':' + str(
            self.Helpers.confs["Server"]["Port"]) + '/Inference'
        self.headers = {'content-type': 'image/jpeg'}

    def send(self, imagePath):
        """ Sends image to the inference API endpoint. """

        img = cv2.imread(imagePath)
        _, img_encoded = cv2.imencode('.png', img)

        response = requests.post(self.addr,
                                 data=img_encoded.tostring(),
                                 headers=self.headers)

        response = json.loads(response.text)
        self.Helpers.logMessage(self.LogFile, "Facial Recognition Server",
                                "Classification",
                                imagePath + " | " + response["Message"])

    def test(self):
        """ Loops through all images in the testing directory and sends them to the inference API endpoint. """

        testingDir = self.Helpers.confs["Classifier"]["TestingPath"]

        for test in os.listdir(testingDir):
            if os.path.splitext(
                    test)[1] in self.Helpers.confs["Classifier"]["ValidIType"]:
                self.Helpers.logMessage(self.LogFile,
                                        "Facial Recognition Server",
                                        "Classification",
                                        "Sending " + testingDir + test)
                self.send(testingDir + test)
                time.sleep(5)
示例#7
0
class Movidius():
    """ ALL Detection System 2019 Movidius Class

    Movidius helper functions for the ALL Detection System 2019 Facial Authentication Server project. 
    """
    
    def __init__(self, LogPath):
        """ Initializes the Movidius class. """
         
        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(LogPath+"/Movidius")

        self.ncsDevices = None
        self.ncsDevice = None
        
    def checkNCS(self):
        """ Checks for NCS devices and returns True if devices are found. """

        self.ncsDevices = mvnc.EnumerateDevices()
        
        if len(self.ncsDevices) == 0:
            self.Helpers.logMessage(self.LogFile, "Movdius", "Status", "No NCS devices found, TASS exiting!")
            quit()
            
        self.ncsDevice = mvnc.Device(self.ncsDevices[0])
        self.ncsDevice.OpenDevice()
        
        self.Helpers.logMessage(self.LogFile, "Movdius", "Status", "Connected To NCS")

    def allocateGraph(self, graphfile):
        """ Allocates the NCS graph. """

        self.Graph = self.ncsDevice.AllocateGraph(graphfile)
        
        self.Helpers.logMessage(self.LogFile, "Movdius", "Status", "Graph Allocated")
class Movidius():

    ###############################################################
    #
    # Movidius helper functions
    #
    ###############################################################

    def __init__(self, LogPath):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        #
        ###############################################################

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(LogPath + "/Movidius")

        self.ncsDevices = None
        self.ncsDevice = None

    def checkNCS(self):

        ###############################################################
        #
        # Checks for NCS devices and returns True if devices are fol
        #
        ###############################################################

        #mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
        self.ncsDevices = mvnc.EnumerateDevices()

        if len(self.ncsDevices) == 0:
            self.Helpers.logMessage(self.LogFile, "Movdius", "Status",
                                    "No NCS devices found, TASS exiting!")
            quit()

        self.ncsDevice = mvnc.Device(self.ncsDevices[0])
        self.ncsDevice.OpenDevice()

        self.Helpers.logMessage(self.LogFile, "Movdius", "Status",
                                "Connected To NCS")

    def allocateGraph(self, graphfile):

        ###############################################################
        #
        # Checks for NCS devices and returns True if devices are fol
        #
        ###############################################################

        self.Graph = self.ncsDevice.AllocateGraph(graphfile)

        self.Helpers.logMessage(self.LogFile, "Movdius", "Status",
                                "Graph Allocated")
class Trainer():

    ###############################################################
    #
    # Sets up all default requirements and placeholders
    # needed for the NLU engine to run.
    #
    # - Helpers: Useful global functions
    # - JumpWay/jumpWayClient: iotJumpWay class and connection
    # - Logging: Logging class
    #
    ###############################################################

    def __init__(self):

        self.Helpers = Helpers()
        self._confs = self.Helpers.loadConfigs()
        self.LogFile = self.Helpers.setLogFile(self._confs["aiCore"]["Logs"] +
                                               "Train/")

        self.intentMap = {}
        self.words = []
        self.classes = []
        self.dataCorpus = []

        self.Model = Model()
        self.Data = Data()

    def setupData(self):

        self.trainingData = self.Data.loadTrainingData()

        self.words, self.classes, self.dataCorpus, self.intentMap = self.Data.prepareData(
            self.trainingData)
        self.x, self.y = self.Data.finaliseData(self.classes, self.dataCorpus,
                                                self.words)

        self.Helpers.logMessage(self.LogFile, "TRAIN", "INFO",
                                "NLU Training Data Ready")

    def setupEntities(self):

        if self._confs["NLU"]["Entities"] == "Mitie":
            self.entityController = Entities()
            self.entityController.trainEntities(
                self._confs["NLU"]["Mitie"]["ModelLocation"],
                self.trainingData)
            self.Helpers.logMessage(self.LogFile, "TRAIN", "OK",
                                    "NLU Trainer Entities Ready")

    def trainModel(self):

        while True:
            self.Helpers.logMessage(self.LogFile, "TRAIN", "ACTION",
                                    "Ready To Begin Training ? (Yes/No)")
            userInput = input(">")

            if userInput == 'Yes': break
            if userInput == 'No': exit()

        self.setupData()
        self.setupEntities()

        humanStart, trainingStart = self.Helpers.timerStart()

        self.Model.trainDNN(self.x, self.y, self.words, self.classes,
                            self.intentMap)

        trainingEnd, trainingTime, humanEnd = self.Helpers.timerEnd(
            trainingStart)

        self.Helpers.logMessage(
            self.LogFile, "TRAIN", "OK", "NLU Model Trained At " + humanEnd +
            " In " + str(trainingEnd) + " Seconds")
示例#10
0
class Trainer():

    ###############################################################
    #
    # Trainer class
    #
    ###############################################################

    def __init__(self):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        # needed for this class.
        #
        ###############################################################

        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfs()
        self.logFile = self.Helpers.setLogFile(
            self.confs["Settings"]["Logs"]["DataLogDir"])

        self.labelsToName = {}

        self.Helpers.logMessage(self.logFile, "init", "INFO", "Init complete")

    def getSplit(self, split_name):

        ###############################################################
        #
        # Obtains the training/validation split
        #
        ###############################################################

        #Check whether the split_name is train or validation
        if split_name not in ['train', 'validation']:
            raise ValueError(
                'The split_name %s is not recognized. Please input either train or validation as the split_name'
                % (split_name))

        #Create the full path for a general FilePattern to locate the tfrecord_files
        FilePattern_path = os.path.join(
            self.confs["Classifier"]["DatasetDir"],
            self.confs["Classifier"]["FilePattern"] % (split_name))

        #Count the total number of examples in all of these shard
        num_samples = 0
        FilePattern_for_counting = '200label_' + split_name
        tfrecords_to_count = [
            os.path.join(self.confs["Classifier"]["DatasetDir"], file)
            for file in os.listdir(self.confs["Classifier"]["DatasetDir"])
            if file.startswith(FilePattern_for_counting)
        ]

        #print(tfrecords_to_count)
        for tfrecord_file in tfrecords_to_count:
            for record in tf.python_io.tf_record_iterator(tfrecord_file):
                num_samples += 1

        #Create a reader, which must be a TFRecord reader in this case
        reader = tf.TFRecordReader

        #Create the keys_to_features dictionary for the decoder
        keys_to_features = {
            'image/encoded':
            tf.FixedLenFeature((), tf.string, default_value=''),
            'image/format':
            tf.FixedLenFeature((), tf.string, default_value='jpg'),
            'image/class/label':
            tf.FixedLenFeature([],
                               tf.int64,
                               default_value=tf.zeros([], dtype=tf.int64)),
        }

        #Create the items_to_handlers dictionary for the decoder.
        items_to_handlers = {
            'image': slim.tfexample_decoder.Image(),
            'label': slim.tfexample_decoder.Tensor('image/class/label'),
        }

        #Start to create the decoder
        decoder = slim.tfexample_decoder.TFExampleDecoder(
            keys_to_features, items_to_handlers)

        #Create the labels_to_name file
        labels_to_name_dict = self.labelsToName

        #Actually create the dataset
        dataset = slim.dataset.Dataset(
            data_sources=FilePattern_path,
            decoder=decoder,
            reader=reader,
            num_readers=4,
            num_samples=num_samples,
            num_classes=self.confs["Classifier"]["NumClasses"],
            labels_to_name=labels_to_name_dict,
            items_to_descriptions=self.items_to_descriptions)

        return dataset

    def loadBatch(self, dataset, is_training=True):

        ###############################################################
        #
        # Loads a batch for training
        #
        ###############################################################

        #First create the data_provider object
        data_provider = slim.dataset_data_provider.DatasetDataProvider(
            dataset,
            common_queue_capacity=24 +
            3 * self.confs["Classifier"]["BatchSize"],
            common_queue_min=24)

        #Obtain the raw image using the get method
        raw_image, label = data_provider.get(['image', 'label'])

        #Perform the correct preprocessing for this image depending if it is training or evaluating
        image = Classes.inception_preprocessing.preprocess_image(
            raw_image, self.confs["Classifier"]["ImageSize"],
            self.confs["Classifier"]["ImageSize"], is_training)

        #As for the raw images, we just do a simple reshape to batch it up
        raw_image = tf.image.resize_image_with_crop_or_pad(
            raw_image, self.confs["Classifier"]["ImageSize"],
            self.confs["Classifier"]["ImageSize"])

        #Batch up the image by enqueing the tensors internally in a FIFO queue and dequeueing many elements with tf.train.batch.
        images, raw_images, labels = tf.train.batch(
            [image, raw_image, label],
            batch_size=self.confs["Classifier"]["BatchSize"],
            num_threads=4,
            capacity=4 * self.confs["Classifier"]["BatchSize"],
            allow_smaller_final_batch=True)

        return images, raw_images, labels
示例#11
0
class Facenet():

    ###############################################################
    #
    # Facenet helper functions
    #
    ###############################################################

    def __init__(self, LogPath, Configuration):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        #
        ###############################################################

        # Class settings
        self.Known = []
        self.Configuration = Configuration

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Configuration.AiCore["Logs"] + "/Facenet")

        # OpenCV settings
        self.OpenCV = OpenCV(self.Helpers)

        # Dlib settings
        self.Detector = dlib.get_frontal_face_detector()
        self.Predictor = dlib.shape_predictor(
            self.Configuration.Classifier["Dlib"])

    def PreprocessKnown(self, ValidDir, Graph):

        ###############################################################
        #
        # Preprocesses known images
        #
        ###############################################################

        for validFile in os.listdir(ValidDir):
            if os.path.splitext(validFile)[1] in self.Configuration.Classifier[
                    "ValidIType"]:
                self.Known.append({
                    "File":
                    validFile,
                    "Score":
                    self.Infer(
                        cv2.resize(cv2.imread(ValidDir + validFile),
                                   (640, 480)), Graph)
                })

        self.Helpers.logMessage(self.LogFile, "Facenet", "STATUS",
                                str(len(self.Known)) + " known images found.")

    def ProcessFrame(self, Frame):

        ###############################################################
        #
        # Processes frame
        #
        ###############################################################

        Known = []

        Frame = cv2.resize(cv2.imdecode(Frame, cv2.IMREAD_UNCHANGED),
                           (640, 480))
        RawFrame = Frame.copy()
        Gray = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)

        Path = "Data/Captured/" + datetime.now().strftime(
            "%Y-%m-%d") + "/" + datetime.now().strftime("%H") + "/"
        FileName = datetime.now().strftime('%M-%S') + ".jpg"
        FileNameGray = datetime.now().strftime('%M-%S') + "-Gray.jpg"

        self.OpenCV.SaveFrame(Path + "/", FileName, Frame)
        self.OpenCV.SaveFrame(Path + "/", FileNameGray, Gray)

        return Frame

    def LoadGraph(self):

        ###############################################################
        #
        # Loads Facenet graph
        #
        ###############################################################

        with open(self.Configuration.Classifier["NetworkPath"] +
                  self.Configuration.Classifier["Graph"],
                  mode='rb') as f:
            graphFile = f.read()

        self.Helpers.logMessage(self.LogFile, "Facenet", "Status",
                                "Loaded TASS Graph")

        return graphFile

    def Infer(self, face, graph):

        ###############################################################
        #
        # Runs the image through Facenet
        #
        ###############################################################

        graph.LoadTensor(self.PreProcess(face).astype(np.float16), None)
        output, userobj = graph.GetResult()

        return output

    def PreProcess(self, src):

        ###############################################################
        #
        # Preprocesses an image
        #
        ###############################################################

        NETWORK_WIDTH = 160
        NETWORK_HEIGHT = 160
        preprocessed_image = cv2.resize(src, (NETWORK_WIDTH, NETWORK_HEIGHT))
        preprocessed_image = cv2.cvtColor(preprocessed_image,
                                          cv2.COLOR_BGR2RGB)
        preprocessed_image = self.OpenCV.whiten(preprocessed_image)

        return preprocessed_image

    def Compare(self, face1, face2):

        ###############################################################
        #
        # Determines whether two images are a match
        #
        ###############################################################

        if (len(face1) != len(face2)):
            self.Helpers.logMessage(self.LogFile, "Facenet", "!ERROR!",
                                    "Distance Missmatch")
            return False

        tdiff = 0
        for index in range(0, len(face1)):
            diff = np.square(face1[index] - face2[index])
            tdiff += diff

        if (tdiff < 1.3):
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification",
                                    "Calculated Match: " + str(tdiff))
            return True, tdiff
        else:
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification",
                                    "Calculated Mismatch: " + str(tdiff))
            return False, tdiff

    def addFrameBB(self, frame, x, y, w, h):

        ###############################################################
        #
        # Adds bounding box to the passed frame
        #
        ###############################################################

        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        return frame

    def addFrameFeatures(self, frame, shape, x, y):

        ###############################################################
        #
        # Adds facial features to the passed frame
        #
        ###############################################################

        for (x, y) in shape:
            cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)

        return frame
示例#12
0
class Data():
    """ ALL Detection System 2019 Data Class

    Data class for the ALL Detection System 2019 Chatbot. 
    """
    def __init__(self):
        """ Initializes the Data class. """

        self.ignore = [',', '.', '!', '?']

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Helpers.confs["System"]["Logs"] + "JumpWay/")

        self.LancasterStemmer = LancasterStemmer()

    def loadTrainingData(self):
        """ Loads the NLU and NER training data from Model/Data/training.json """

        with open("Model/Data/training.json") as jsonData:
            trainingData = json.load(jsonData)

        self.Helpers.logMessage(self.LogFile, "Data", "INFO",
                                "Training Data Ready")

        return trainingData

    def loadTrainedData(self):
        """ Loads the saved training configuratuon """

        with open("Model/model.json") as jsonData:
            modelData = json.load(jsonData)

        self.Helpers.logMessage(self.LogFile, "Data", "INFO",
                                "Model Data Ready")

        return modelData

    def sortList(self, listToSort):
        """ Sorts a list by sorting the list, and removing duplicates 
        
        More Info:
        https://www.programiz.com/python-programming/methods/built-in/sorted 
        https://www.programiz.com/python-programming/list
        https://www.programiz.com/python-programming/set
        """

        return sorted(list(set(listToSort)))

    def extract(self, data=None, splitIt=False):
        """ Extracts words from sentences  
        
        More Info:
        https://www.nltk.org/_modules/nltk/stem/lancaster.html
        http://insightsbot.com/blog/R8fu5/bag-of-words-algorithm-in-python-introduction
        """

        return [
            self.LancasterStemmer.stem(word)
            for word in (data.split() if splitIt == True else data)
            if word not in self.ignore
        ]

    def makeBagOfWords(self, sInput, words):
        """ Makes a bag of words  
        
        Makes a bag of words used by the inference and training 
        features. If makeBagOfWords is called during training, sInput 
        will be a list.
         
        More Info:
        http://insightsbot.com/blog/R8fu5/bag-of-words-algorithm-in-python-introduction
        """

        if type(sInput) == list:
            bagOfWords = []
            for word in words:
                if word in sInput:
                    bagOfWords.append(1)
                else:
                    bagOfWords.append(0)
            return bagOfWords

        else:
            bagOfWords = np.zeros(len(words))
            for cword in self.extract(sInput, True):
                for i, word in enumerate(words):
                    if word == cword: bagOfWords[i] += 1
            return np.array(bagOfWords)

    def prepareClasses(self, intent, classes):
        """ Prepares classes 
        
        Adds an intent key to classes if it does not already exist
        """

        if intent not in classes: classes.append(intent)
        return classes

    def prepareData(self,
                    trainingData=[],
                    wordsHldr=[],
                    dataCorpusHldr=[],
                    classesHldr=[]):
        """ Prepares date 
        
        Prepares the NLU and NER training data, loops through the 
        intents from our dataset, converts any entities / synoynms  
        """

        counter = 0
        intentMap = {}

        for intent in trainingData['intents']:

            theIntent = intent['intent']
            for text in intent['text']:

                if 'entities' in intent and len(intent['entities']):
                    i = 0
                    for entity in intent['entities']:
                        tokens = text.replace(
                            trainingData['intents'][counter]["text"][i],
                            "<" + entity["entity"] + ">").lower().split()
                        wordsHldr.extend(tokens)
                        dataCorpusHldr.append((tokens, theIntent))
                        i = i + 1
                else:
                    tokens = text.lower().split()
                    wordsHldr.extend(tokens)
                    dataCorpusHldr.append((tokens, theIntent))

            intentMap[theIntent] = counter
            classesHldr = self.prepareClasses(theIntent, classesHldr)
            counter = counter + 1

        return self.sortList(self.extract(
            wordsHldr,
            False)), self.sortList(classesHldr), dataCorpusHldr, intentMap

    def finaliseData(self, classes, dataCorpus, words):
        """ Finalises the NLU training data  """

        trainData = []
        out = np.zeros(len(classes))

        for document in dataCorpus:
            output = list(out)
            output[classes.index(document[1])] = 1
            trainData.append([
                self.makeBagOfWords(self.extract(document[0], False), words),
                output
            ])

        random.shuffle(trainData)
        trainData = np.array(trainData)

        self.Helpers.logMessage(self.LogFile, "Data", "INFO",
                                "Finalised Training Data Ready")

        return list(trainData[:, 0]), list(trainData[:, 1])
class Facenet():
    """ ALL Detection System 2019 Facenet Class

    Facenet helper functions for the ALL Detection System 2019 Facial Authentication Server project. 
    """
    
    def __init__(self, LogPath):
        """ Initializes the Facenet class. """

        # Class settings
        self.Known = []
        
        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(self.Helpers.confs["Server"]["Logs"]+"/Facenet")
        
        # OpenCV settings 
        self.OpenCV = OpenCV(self.Helpers)
        
        # Dlib settings 
        self.Detector   = dlib.get_frontal_face_detector()
        self.Predictor  = dlib.shape_predictor(self.Helpers.confs["Classifier"]["Dlib"])

    def PreprocessKnown(self, ValidDir, Graph):
        """ Preprocesses known images. """
        
        for validFile in os.listdir(ValidDir):
            if os.path.splitext(validFile)[1] in self.Helpers.confs["Classifier"]["ValidIType"]:
                self.Known.append({"File": validFile, "Score": self.Infer(cv2.resize(cv2.imread(ValidDir+validFile), (640, 480)), Graph)})
        
        self.Helpers.logMessage(self.LogFile,
                                "Facenet",
                                "STATUS",
                                str(len(self.Known)) + " known images found.") 

    def ProcessFrame(self, Frame):
        """ Preprocesses frame. """

        Known = []

        Frame = cv2.resize(cv2.imdecode(Frame, cv2.IMREAD_UNCHANGED), (640, 480)) 
        RawFrame = Frame.copy()
        Gray = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)

        Path = "Data/Captured/" + datetime.now().strftime("%Y-%m-%d") + "/" + datetime.now().strftime("%H") + "/"
        FileName = datetime.now().strftime('%M-%S') + ".jpg"
        FileNameGray = datetime.now().strftime('%M-%S') + "-Gray.jpg"

        self.OpenCV.SaveFrame(Path + "/", FileName, Frame)
        self.OpenCV.SaveFrame(Path + "/", FileNameGray, Gray)

        return Frame

    def LoadGraph(self):
        """ Loads Facenet graph. """

        with open(self.Helpers.confs["Classifier"]["Graph"], mode='rb') as f:
            graphFile = f.read()
            
        self.Helpers.logMessage(self.LogFile, "Facenet", "Status", "Loaded TASS Graph")

        return graphFile
        
    def Infer(self, face, graph):
        """ Runs the image through Facenet. """
        
        graph.LoadTensor(self.PreProcess(face).astype(np.float16), None)
        output, userobj = graph.GetResult()

        return output

    def PreProcess(self, src):
        """ Preprocesses an image. """
        
        NETWORK_WIDTH = 160
        NETWORK_HEIGHT = 160
        
        preprocessed_image = cv2.resize(src, (NETWORK_WIDTH, NETWORK_HEIGHT))
        preprocessed_image = cv2.cvtColor(preprocessed_image, cv2.COLOR_BGR2RGB)
        preprocessed_image = self.OpenCV.whiten(preprocessed_image)
        
        return preprocessed_image

    def Compare(self, face1, face2):
        """ Determines whether two images are a match. """

        if (len(face1) != len(face2)):
            self.Helpers.logMessage(self.LogFile, "Facenet", "!ERROR!", "Distance Missmatch")
            return False

        tdiff = 0
        for index in range(0, len(face1)):
            diff = np.square(face1[index] - face2[index])
            tdiff += diff

        if (tdiff < 1.3):
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification", "Calculated Match: " + str(tdiff))
            return True, tdiff
        else:
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification", "Calculated Mismatch: " + str(tdiff))
            return False, tdiff
示例#14
0
class Data():
    def __init__(self):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        # needed for the NLU engine to run.
        #
        # - Helpers: Useful global functions
        # - Logging: Logging class
        # - LancasterStemmer: Word stemmer
        #
        ###############################################################

        self.ignore = [',', '.', '!', '?']

        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfigs()
        self.LogFile = self.Helpers.setLogFile(self.confs["aiCore"]["Logs"] +
                                               "JumpWay/")

        self.LancasterStemmer = LancasterStemmer()

    def loadTrainingData(self):

        ###############################################################
        #
        # Loads the NLU and NER training data from Model/Data/training.json
        #
        ###############################################################

        with open("Model/Data/training.json") as jsonData:
            trainingData = json.load(jsonData)

        self.Helpers.logMessage(self.LogFile, "Data", "INFO",
                                "Training Data Ready")

        return trainingData

    def loadTrainedData(self):

        ###############################################################
        #
        # Loads the saved training configuratuon
        #
        ###############################################################

        with open("Model/model.json") as jsonData:
            modelData = json.load(jsonData)

        self.Helpers.logMessage(self.LogFile, "Data", "INFO",
                                "Model Data Ready")

        return modelData

    def sortList(self, listToSort):

        ###############################################################
        #
        # Sorts a list by sorting the list, and removing duplicates
        #
        # https://www.programiz.com/python-programming/methods/built-in/sorted
        # https://www.programiz.com/python-programming/list
        # https://www.programiz.com/python-programming/set
        #
        ###############################################################

        return sorted(list(set(listToSort)))

    def extract(self, data=None, splitIt=False):

        ###############################################################
        #
        # Extracts words from sentences, stripping out characters in
        # the ignore list above
        #
        # https://www.nltk.org/_modules/nltk/stem/lancaster.html
        # http://insightsbot.com/blog/R8fu5/bag-of-words-algorithm-in-python-introduction
        #
        ###############################################################

        return [
            self.LancasterStemmer.stem(word)
            for word in (data.split() if splitIt == True else data)
            if word not in self.ignore
        ]

    def makeBagOfWords(self, sInput, words):

        ###############################################################
        #
        # Makes a bag of words used by the inference and training
        # features. If makeBagOfWords is called during training, sInput
        # will be a list.
        #
        # http://insightsbot.com/blog/R8fu5/bag-of-words-algorithm-in-python-introduction
        #
        ###############################################################

        if type(sInput) == list:
            bagOfWords = []
            for word in words:
                if word in sInput:
                    bagOfWords.append(1)
                else:
                    bagOfWords.append(0)
            return bagOfWords

        else:
            bagOfWords = np.zeros(len(words))
            for cword in self.extract(sInput, True):
                for i, word in enumerate(words):
                    if word == cword: bagOfWords[i] += 1
            return np.array(bagOfWords)

    def prepareClasses(self, intent, classes):

        ###############################################################
        #
        # Adds an intent key to classes if it does not already exist
        #
        ###############################################################

        if intent not in classes: classes.append(intent)
        return classes

    def prepareData(self,
                    trainingData=[],
                    wordsHldr=[],
                    dataCorpusHldr=[],
                    classesHldr=[]):

        ###############################################################
        #
        # Prepares the NLU and NER training data, loops through the
        # intents from our dataset, converts any entities / synoynms
        #
        ###############################################################

        counter = 0
        intentMap = {}

        for intent in trainingData['intents']:

            theIntent = intent['intent']
            for text in intent['text']:

                if 'entities' in intent and len(intent['entities']):
                    i = 0
                    for entity in intent['entities']:
                        tokens = text.replace(
                            trainingData['intents'][counter]["text"][i],
                            "<" + entity["entity"] + ">").lower().split()
                        wordsHldr.extend(tokens)
                        dataCorpusHldr.append((tokens, theIntent))
                        i = i + 1
                else:
                    tokens = text.lower().split()
                    wordsHldr.extend(tokens)
                    dataCorpusHldr.append((tokens, theIntent))

            intentMap[theIntent] = counter
            classesHldr = self.prepareClasses(theIntent, classesHldr)
            counter = counter + 1

        return self.sortList(self.extract(
            wordsHldr,
            False)), self.sortList(classesHldr), dataCorpusHldr, intentMap

    def finaliseData(self, classes, dataCorpus, words):

        ###############################################################
        #
        # Finalises the NLU training data
        #
        ###############################################################

        trainData = []
        out = np.zeros(len(classes))

        for document in dataCorpus:
            output = list(out)
            output[classes.index(document[1])] = 1
            trainData.append([
                self.makeBagOfWords(self.extract(document[0], False), words),
                output
            ])

        random.shuffle(trainData)
        trainData = np.array(trainData)

        self.Helpers.logMessage(self.LogFile, "Data", "INFO",
                                "Finalised Training Data Ready")

        return list(trainData[:, 0]), list(trainData[:, 1])
示例#15
0
class Classifier():
    def __init__(self):

        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfs()
        self.logFile = self.Helpers.setLogFile(
            self.confs["Settings"]["Logs"]["DataLogDir"])
        self.Helpers.logMessage(self.logFile, "init", "INFO", "Init complete")

        self.movidius = None

        self.mean = 128
        self.std = 1 / 128

        self.categories = []
        self.graphfile = None
        self.graph = None
        self.reqsize = None

        self.extensions = [".jpg", ".png"]

        self.CheckDevices()

    def CheckDevices(self):

        #mvnc.SetGlobalOption(mvnc.GlobalOption.LOGLEVEL, 2)
        devices = mvnc.EnumerateDevices()
        if len(devices) == 0:
            self.Helpers.logMessage(self.logFile, "CheckDevices", "WARNING",
                                    "No Movidius Devices Found")
            quit()
        self.movidius = mvnc.Device(devices[0])
        self.movidius.OpenDevice()
        self.Helpers.logMessage(self.logFile, "CheckDevices", "STATUS",
                                "Movidius Connected")

    def AllocateGraph(self, graphfile):

        self.graph = self.movidius.AllocateGraph(graphfile)

    def LoadInception(self):

        self.reqsize = self.confs["Classifier"]["ImageSize"]
        with open(self.confs["Classifier"]["NetworkPath"] +
                  self.confs["Classifier"]["InceptionGraph"],
                  mode='rb') as f:
            self.graphfile = f.read()
        self.AllocateGraph(self.graphfile)
        self.Helpers.logMessage(self.logFile, "LoadInception", "STATUS",
                                "Graph Allocated")

        with open(
                self.confs["Classifier"]["NetworkPath"] + 'Model/classes.txt',
                'r') as f:
            for line in f:
                cat = line.split('\n')[0]
                if cat != 'classes':
                    self.categories.append(cat)
            f.close()
        self.Helpers.logMessage(self.logFile, "LoadInception", "STATUS",
                                "Categories Loaded")
示例#16
0
class Trainer():
    """ ALL Detection System 2019 Chatbot Training Class

    Trains the ALL Detection System 2019 Chatbot. 
    """
    def __init__(self):
        """ Initializes the Training class. """

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Helpers.confs["System"]["Logs"] + "Train/")

        self.intentMap = {}
        self.words = []
        self.classes = []
        self.dataCorpus = []

        self.Model = Model()
        self.Data = Data()

    def setupData(self):
        """ Prepares the data. """

        self.trainingData = self.Data.loadTrainingData()

        self.words, self.classes, self.dataCorpus, self.intentMap = self.Data.prepareData(
            self.trainingData)
        self.x, self.y = self.Data.finaliseData(self.classes, self.dataCorpus,
                                                self.words)

        self.Helpers.logMessage(self.LogFile, "TRAIN", "INFO",
                                "NLU Training Data Ready")

    def setupEntities(self):
        """ Prepares the entities. """

        if self.Helpers.confs["NLU"]["Entities"] == "Mitie":
            self.entityController = Entities()
            self.entityController.trainEntities(
                self.Helpers.confs["NLU"]["Mitie"]["ModelLocation"],
                self.trainingData)
            self.Helpers.logMessage(self.LogFile, "TRAIN", "OK",
                                    "NLU Trainer Entities Ready")

    def trainModel(self):
        """ Trains the model. """

        while True:
            self.Helpers.logMessage(self.LogFile, "TRAIN", "ACTION",
                                    "Ready To Begin Training ? (Yes/No)")
            userInput = input(">")

            if userInput == 'Yes': break
            if userInput == 'No': exit()

        self.setupData()
        self.setupEntities()

        humanStart, trainingStart = self.Helpers.timerStart()

        self.Model.trainDNN(self.x, self.y, self.words, self.classes,
                            self.intentMap)

        trainingEnd, trainingTime, humanEnd = self.Helpers.timerEnd(
            trainingStart)

        self.Helpers.logMessage(
            self.LogFile, "TRAIN", "OK", "NLU Model Trained At " + humanEnd +
            " In " + str(trainingEnd) + " Seconds")
示例#17
0
class Chatbot():
    """ ALL Detection System 2019 Chatbot Class

    The ALL Detection System 2019 Chatbot. 
    """
    def __init__(self):
        """ Initializes the Chatbot class. """

        self.isTraining = False
        self.ner = None

        self.Helpers = Helpers()

        self.user = {}

        self.LogFile = self.Helpers.setLogFile(
            self.Helpers.confs["System"]["Logs"] + "NLU/")
        self.ChatLogFile = self.Helpers.setLogFile(
            self.Helpers.confs["System"]["Logs"] + "Chat/")

    def initiateSession(self):
        """ Initializes a Chatbot sesiion. 
        
        Initiates empty guest user session, GeniSys will ask the user 
        verify their GeniSys user by speaking or typing if it does
        not know who it is speaking to. 
        """

        self.userID = 0
        if not self.userID in self.user:
            self.user[self.userID] = {}
            self.user[self.userID]["history"] = {}

    def initNLU(self):
        """ Initializes a Chatbot sesiion. 
        
        Initiates the NLU setting up the data, NLU / entities models 
        and required modules such as context and extensions.
        """

        self.Data = Data()
        self.trainingData = self.Data.loadTrainingData()
        self.trainedData = self.Data.loadTrainedData()

        self.Model = Model()
        self.Context = Context()
        self.Extensions = Extensions()

        self.restoreData()
        self.restoreNER()
        self.restoreNLU()

        self.initiateSession()
        self.setThresholds()

    def commandsCallback(self, topic, payload):
        """ iotJumpWay callback function. 
        
        The callback function that is triggerend in the event of a 
        command communication from the iotJumpWay.
        """

        self.Helpers.logMessage(
            self.LogFile, "iotJumpWay", "INFO",
            "Recieved iotJumpWay Command Data : " + str(payload))

        commandData = json.loads(payload.decode("utf-8"))

    def restoreData(self):
        """ Restores the training data. 
        
        Sets the local trained data using data retrieved above
        """

        self.trainedWords = self.trainedData["words"]
        self.trainedClasses = self.trainedData["classes"]
        self.x = self.trainedData["x"]
        self.y = self.trainedData["y"]
        self.intentMap = self.trainedData["intentMap"][0]

    def loadEntityController(self):
        """ Initiates the entity extractor class """

        self.entityController = Entities()

    def restoreNER(self):
        """ Loads entity controller and restores the NER model """

        self.loadEntityController()
        self.ner = self.entityController.restoreNER()

    def restoreNLU(self):
        """ Restores the NLU model """

        self.tmodel = self.Model.buildDNN(self.x, self.y)

    def setThresholds(self):
        """ Sets thresholds
        
        Sets the threshold for the NLU engine, this can be changed
        using arguments to commandline programs or paramters for 
        API calls.
        """

        self.threshold = self.Helpers.confs["NLU"]["Threshold"]
        self.entityThrshld = self.Helpers.confs["NLU"]["Mitie"]["Threshold"]

    def communicate(self, sentence):
        """ Responds to the user
        
        First checks to ensure that the program is not training, 
        then parses any entities that may be in the intent, then 
        checks context and extensions before providing a response.
        """

        if self.isTraining == False:

            parsed, fallback, entityHolder, parsedSentence = self.entityController.parseEntities(
                sentence, self.ner, self.trainingData)

            classification = self.Model.predict(self.tmodel, parsedSentence,
                                                self.trainedWords,
                                                self.trainedClasses)

            if len(classification) > 0:

                clearEntities = False
                theIntent = self.trainingData["intents"][self.intentMap[
                    classification[0][0]]]

                if len(entityHolder) and not len(theIntent["entities"]):
                    clearEntities = True

                if (self.Context.checkSessionContext(self.user[self.userID],
                                                     theIntent)):

                    if self.Context.checkClearContext(theIntent, 0):
                        self.user[self.userID]["context"] = ""

                    contextIn, contextOut, contextCurrent = self.Context.setContexts(
                        theIntent, self.user[self.userID])

                    if not len(entityHolder) and len(theIntent["entities"]):
                        response, entities = self.entityController.replaceResponseEntities(
                            random.choice(theIntent["fallbacks"]),
                            entityHolder)
                        extension, extensionResponses, exEntities = self.Extensions.setExtension(
                            theIntent)
                    elif clearEntities:
                        entityHolder = []
                        response = random.choice(theIntent["responses"])
                        extension, extensionResponses, exEntities = self.Extensions.setExtension(
                            theIntent)
                    else:
                        response, entities = self.entityController.replaceResponseEntities(
                            random.choice(theIntent["responses"]),
                            entityHolder)
                        extension, extensionResponses, exEntities = self.Extensions.setExtension(
                            theIntent)

                    if extension != None:
                        classParts = extension.split(".")
                        classFolder = classParts[0]
                        className = classParts[1]
                        theEntities = None

                        if exEntities != False:
                            theEntities = entities

                        module = __import__(
                            classParts[0] + "." + classParts[1], globals(),
                            locals(), [className])
                        extensionClass = getattr(module, className)()
                        response = getattr(extensionClass,
                                           classParts[2])(extensionResponses,
                                                          theEntities)

                    return {
                        "Response":
                        "OK",
                        "ResponseData": [{
                            "Received":
                            sentence,
                            "Intent":
                            classification[0][0],
                            "Confidence":
                            str(classification[0][1]),
                            "Response":
                            response,
                            "Context": [{
                                "In": contextIn,
                                "Out": contextOut,
                                "Current": contextCurrent
                            }],
                            "Extension":
                            extension,
                            "Entities":
                            entityHolder
                        }]
                    }

                else:

                    self.user[self.userID]["context"] = ""
                    contextIn, contextOut, contextCurrent = self.Context.setContexts(
                        theIntent, self.user[self.userID])

                    if fallback and fallback in theIntent and len(
                            theIntent["fallbacks"]):
                        response, entities = self.entityController.replaceResponseEntities(
                            random.choice(theIntent["fallbacks"]),
                            entityHolder)
                        extension, extensionResponses = None, []
                    else:
                        response, entities = self.entityController.replaceResponseEntities(
                            random.choice(theIntent["responses"]),
                            entityHolder)
                        extension, extensionResponses, exEntities = self.Extensions.setExtension(
                            theIntent)

                    if extension != None:
                        classParts = extension.split(".")
                        classFolder = classParts[0]
                        className = classParts[1]
                        theEntities = None

                        if exEntities != False:
                            theEntities = entities

                        module = __import__(
                            classParts[0] + "." + classParts[1], globals(),
                            locals(), [className])
                        extensionClass = getattr(module, className)()
                        response = getattr(extensionClass,
                                           classParts[2])(extensionResponses,
                                                          theEntities)

                    else:
                        response = self.entityController.replaceResponseEntities(
                            random.choice(theIntent["responses"]),
                            entityHolder)
                        if (type(response) == tuple):
                            response = response[0]

                    return {
                        "Response":
                        "OK",
                        "ResponseData": [{
                            "Received":
                            sentence,
                            "Intent":
                            classification[0][0],
                            "Confidence":
                            str(classification[0][1]),
                            "Response":
                            response,
                            "Context": [{
                                "In": contextIn,
                                "Out": contextOut,
                                "Current": contextCurrent
                            }],
                            "Extension":
                            extension,
                            "Entities":
                            entityHolder
                        }]
                    }

            else:

                contextCurrent = self.Context.getCurrentContext(
                    self.user[self.userID])

                return {
                    "Response":
                    "FAILED",
                    "ResponseData": [{
                        "Received":
                        sentence,
                        "Intent":
                        "UNKNOWN",
                        "Confidence":
                        "NA",
                        "Responses": [],
                        "Response":
                        random.choice(
                            self.Helpers.confs["NLU"]["defaultResponses"]),
                        "Context": [{
                            "In": "NA",
                            "Out": "NA",
                            "Current": contextCurrent
                        }],
                        "Extension":
                        "NA",
                        "Entities":
                        entityHolder
                    }]
                }
        else:

            return {
                "Response":
                "FAILED",
                "ResponseData": [{
                    "Status": "Training",
                    "Message": "NLU Engine is currently training"
                }]
            }
示例#18
0
class Data():
        
    ###############################################################
    #
    # Core Data class.
    #
    ###############################################################

    def __init__(self):
        
        ###############################################################
        #
        # Sets up all default requirements and placeholders 
        # needed for this class. 
        #
        ###############################################################
        
        self.Helpers = Helpers()
        self.confs = self.Helpers.loadConfs()
        self.logFile = self.Helpers.setLogFile(self.confs["Settings"]["Logs"]["DataLogDir"])

    def getLabelsAndDirectories(self):
        
        ###############################################################
        #
        # Returns a list of classes/labels and directories. 
        #
        ###############################################################

        labels = [name for name in os.listdir(self.confs["Classifier"]["DatasetDir"]) if os.path.isdir(os.path.join(self.confs["Classifier"]["DatasetDir"], name)) and name != '.ipynb_checkpoints']

        directories = []
        for dirName in os.listdir(self.confs["Classifier"]["DatasetDir"]):
            if dirName != '.ipynb_checkpoints':
                path = os.path.join(self.confs["Classifier"]["DatasetDir"], dirName)
                if os.path.isdir(path):
                    directories.append(path)
        return labels, directories

    def processFilesAndClasses(self):
        
        ###############################################################
        #
        # Returns a list of filenames and classes/labels. 
        #
        ###############################################################

        labels, directories = self.getLabelsAndDirectories()
        
        data = []
        for directory in directories:
            for filename in os.listdir(directory):
                if os.path.splitext(filename)[1] in self.confs["Classifier"]["ValidIType"]:
                    data.append(os.path.join(directory, filename))
                else:
                    continue
        return data, sorted(labels)

    def convertToTFRecord(self, split_name, filenames, labels_to_ids):
        
        ###############################################################
        #
        # Converts the given filenames to a TFRecord dataset. 
        #
        ###############################################################
        
        assert split_name in ['train', 'validation']

        num_per_shard = int(math.ceil(len(filenames) / float(self.confs["Classifier"]["Shards"])))
        self.Helpers.logMessage(self.logFile, "convertToTFRecord", "INFO", "Number of files: " + str(len(filenames)))
        self.Helpers.logMessage(self.logFile, "convertToTFRecord", "INFO", "Number per shard: " + str(num_per_shard))

        with tf.Graph().as_default():
            image_reader = ImageReader()
            with tf.Session('') as sess:
                for shard_id in range(self.confs["Classifier"]["Shards"]):
                    output_filename = self.getDatasetFilename(split_name, shard_id)
                    self.Helpers.logMessage(self.logFile, "convertToTFRecord", "STATUS", "Saving: " + str(output_filename))
                    with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
                        start_ndx = shard_id * num_per_shard
                        end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
                        for i in range(start_ndx, end_ndx):
                            sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
                                i+1, len(filenames), shard_id))
                            sys.stdout.flush()
                            print("")
                            # Read the filename:
                            image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
                            height, width = image_reader.read_image_dims(sess, image_data)
                            class_name = os.path.basename(os.path.dirname(filenames[i]))
                            class_id = labels_to_ids[class_name]
                            self.Helpers.logMessage(self.logFile, "convertToTFRecord", "INFO", "class_name: " + str(class_name))
                            self.Helpers.logMessage(self.logFile, "convertToTFRecord", "INFO", "class_id: " + str(class_id))
                            example = self.imageToTFExample(
                                image_data, b'jpg', height, width, class_id)
                            tfrecord_writer.write(example.SerializeToString())
        sys.stdout.write('\n')
        sys.stdout.flush()

    def getDatasetFilename(self, split_name, shard_id):
        
        ###############################################################
        #
        # Gets the dataset filename
        #
        ###############################################################

        output_filename = '%s_%s_%05d-of-%05d.tfrecord' % (
            self.confs["Classifier"]["TFRecordFile"], split_name, shard_id, self.confs["Classifier"]["Shards"])
        return os.path.join(self.confs["Classifier"]["DatasetDir"], output_filename)

    def int64Feature(self, values):
        
        ###############################################################
        #
        # Returns a TF-Feature of int64s
        #
        ###############################################################
        
        if not isinstance(values, (tuple, list)):
            values = [values]
        return tf.train.Feature(int64_list=tf.train.Int64List(value=values))

    def bytesFeature(self, values):
        
        ###############################################################
        #
        # Returns a TF-Feature of bytes
        #
        ###############################################################
        
        return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))

    def imageToTFExample(self, image_data, image_format, height, width, class_id):
        
        ###############################################################
        #
        # Converts an image to a TF Example
        #
        ###############################################################

        return tf.train.Example(features=tf.train.Features(feature={
            'image/encoded': self.bytesFeature(image_data),
            'image/format': self.bytesFeature(image_format),
            'image/class/label': self.int64Feature(class_id),
            'image/height': self.int64Feature(height),
            'image/width': self.int64Feature(width)
        }))

    def writeLabels(self, labels_to_labels):
        
        ###############################################################
        #
        # Writes a file with the list of class names
        #
        ###############################################################

        labels_filename = os.path.join(self.confs["Classifier"]["DatasetDir"], self.confs["Classifier"]["Labels"])

        with tf.gfile.Open(self.confs["Classifier"]["Classes"], 'w') as f:
            for label in labels_to_labels:
                f.write('%s\n' % (label))
                
        with tf.gfile.Open(labels_filename, 'w') as f:
            for label in labels_to_labels:
                class_name = labels_to_labels[label]
                f.write('%d:%s\n' % (label, class_name))
class Client():

    ###################################################################
    #
    # Sends single or multiple images to Facial Identification Server.
    #
    ###################################################################

    def __init__(self):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        # needed for the program to run.
        #
        ###############################################################

        # Client setup
        self.Helpers = Helpers()
        self.Configuration = Configuration()
        self.LogFile = self.Helpers.setLogFile(
            self.Configuration.AiCore["Logs"] + "/Client")

        # Request setup
        self.addr = "http://" + self.Configuration.Cameras[0]["API"][
            "IP"] + ':' + str(
                self.Configuration.Cameras[0]["API"]["Port"]) + '/Inference'
        self.headers = {'content-type': 'image/jpeg'}

    def send(self, imagePath):

        ###############################################################
        #
        # Sends image to the inference API endpoint.
        #
        ###############################################################

        img = cv2.imread(imagePath)
        _, img_encoded = cv2.imencode('.png', img)
        response = requests.post(self.addr,
                                 data=img_encoded.tostring(),
                                 headers=self.headers)
        response = json.loads(response.text)
        self.Helpers.logMessage(self.LogFile, "Facial Recognition Server",
                                "Classification",
                                imagePath + " | " + response["Message"])

    def test(self):

        ###############################################################
        #
        # Loops through all images in the testing directory and sends
        # them to the inference API endpoint.
        #
        ###############################################################

        testingDir = self.Configuration.Classifier[
            "NetworkPath"] + self.Configuration.Classifier["TestingPath"]

        for test in os.listdir(testingDir):
            if os.path.splitext(
                    test)[1] in self.Configuration.Classifier["ValidIType"]:
                self.Helpers.logMessage(self.LogFile,
                                        "Facial Recognition Server",
                                        "Classification",
                                        "Sending " + testingDir + test)
                self.send(testingDir + test)
                time.sleep(5)