Ejemplo n.º 1
0
    def _initConfig(self):

        self.name = 'mnist_original'
        util.create_dir(DATASET_DIR)
        self.h = INPUT_SHAPE[0]
        self.w = INPUT_SHAPE[1]
        self.c = INPUT_SHAPE[2]
        self.num_classes = 10
Ejemplo n.º 2
0
    def _initConfig(self):

        self.name = 'mnist_original'
        util.create_dir(DATASET_DIR)
        self.h = INPUT_SHAPE[0]
        self.w = INPUT_SHAPE[1]
        self.c = INPUT_SHAPE[2]
        self.num_classes = 10
Ejemplo n.º 3
0
    def _initConfig(self):

        self.name = 'cifar'
        util.create_dir(DATASET_DIR)
        self.frames = INPUT_SHAPE[0]
        self.h = INPUT_SHAPE[1]
        self.w = INPUT_SHAPE[2]
        self.c = INPUT_SHAPE[3]
        self.num_classes = 100
Ejemplo n.º 4
0
    def _initConfig(self):

        self.name = 'lrw'
        util.create_dir(DATASET_DIR)
        self.frames = INPUT_SHAPE[0]
        self.h = INPUT_SHAPE[1]
        self.w = INPUT_SHAPE[2]
        self.c = INPUT_SHAPE[3]
        self.num_classes = 500
Ejemplo n.º 5
0
    def start_mnist_original_writer(self):

        util.create_dir(MNIST_ORIGINAL_DATASET_DIR)
        util.create_dir(MNIST_ORIGINAL_TFRECORDS_DIR)

        dataset_types = config.config['dataset_types']
        classmapfile = os.path.join(MNIST_ORIGINAL_TFRECORDS_DIR, 'mnist_classmap.txt')

        if not util.isDirOrFileExist(classmapfile):
            mnistClasses = util.mnistClassmap(numAsKeys=False)
            util.writeClassmapFile(classmapfile, mnistClasses)

        mnistReader = MnistReader()

        for datasetType in dataset_types:
            self.mnist_write_records(mnistReader, MNIST_ORIGINAL_TFRECORDS_DIR, 'mnist_original', datasetType)
Ejemplo n.º 6
0
    def mnist_write_records(self, mnistReader, tfrecordsdir, datasetname, datasetType):

        print("Creating dir for saving tf records!")
        dirpath = util.create_dir(os.path.join(tfrecordsdir, datasetType))

        print("Starting processing {} images".format(datasetType))
        batchSize = WRITE_BATCH_SIZE
        totalBatches = int(mnistReader.getNumOfExamples(datasetType) / batchSize) + 1

        for i in range(totalBatches):

            print("Writing batch {}/{}".format(i + 1, totalBatches))

            currentBatch = mnistReader.getData(datasetType)

            images = currentBatch[0][i*batchSize:(i+1)*batchSize].astype(dtype=np.float32)
            labels = currentBatch[1][i*batchSize:(i+1)*batchSize]

            if (datasetname == 'mnist'):
                images = self.resize_images_mnist(images)

            labels = np.argmax(labels, axis=1)

            batch_filename = os.path.join(dirpath, '{}_{}_batch_{}.{}'.format(datasetname, datasetType, i, "tfrecords"))
            self.writeMNIST(batch_filename, images, labels)
Ejemplo n.º 7
0
    def cifar_write_records(self, cifarReader, tfrecordsdir, datasetname,
                            datasetType):

        print("Creating dir for saving tf records!")
        dirpath = util.create_dir(os.path.join(tfrecordsdir, datasetType))

        print("Starting processing {} images".format(datasetType))
        batchSize = WRITE_BATCH_SIZE
        totalBatches = int(
            cifarReader.getNumOfExamples(datasetType) / batchSize) + 1

        for i in range(totalBatches):

            print("Writing batch {}/{}".format(i + 1, totalBatches))
            currentBatch = cifarReader.getData(datasetType)

            images = currentBatch[0][i * batchSize:(i + 1) *
                                     batchSize].astype(dtype=np.float32)
            images = self.resize_images_cifar(images)
            labels = currentBatch[1][i * batchSize:(i + 1) * batchSize]

            batch_filename = os.path.join(
                dirpath, '{}_{}_batch_{}.{}'.format(datasetname, datasetType,
                                                    i, "tfrecords"))
            self.writeCIFAR(batch_filename, images, labels)
Ejemplo n.º 8
0
    def start_cifar_writer(self):

        util.create_dir(CIFAR_DATASET_DIR)
        util.create_dir(CIFAR_TFRECORDS_DIR)

        dataset_types = config.config['dataset_types']
        classmapfile = os.path.join(CIFAR_TFRECORDS_DIR, 'cifar_classmap.txt')
        cifarClassesFile = os.path.join(CIFAR_DATASET_DIR, 'cifar_classes.txt')

        if not util.isDirOrFileExist(classmapfile):
            cifarClasses = util.cifarClassmap(cifarClassesFile)
            util.writeClassmapFile(classmapfile, cifarClasses)

        cifarReader = CifarReader()

        for datasetType in dataset_types:
            self.cifar_write_records(cifarReader, CIFAR_TFRECORDS_DIR, 'cifar', datasetType)
Ejemplo n.º 9
0
    def start_mnist_writer(self):

        util.create_dir(MNIST_DATASET_DIR)
        util.create_dir(MNIST_TFRECORDS_DIR)

        dataset_types = config.config['dataset_types']
        classmapfile = os.path.join(MNIST_TFRECORDS_DIR, 'mnist_classmap.txt')

        if not util.isDirOrFileExist(classmapfile):
            mnistClasses = util.mnistClassmap(numAsKeys=False)
            util.writeClassmapFile(classmapfile, mnistClasses)

        mnistReader = MnistReader()

        for datasetType in dataset_types:
            self.mnist_write_records(mnistReader, MNIST_TFRECORDS_DIR, 'mnist',
                                     datasetType)
Ejemplo n.º 10
0
    def start_cifar_writer(self):

        util.create_dir(CIFAR_DATASET_DIR)
        util.create_dir(CIFAR_TFRECORDS_DIR)

        dataset_types = config.config['dataset_types']
        classmapfile = os.path.join(CIFAR_TFRECORDS_DIR, 'cifar_classmap.txt')
        cifarClassesFile = os.path.join(CIFAR_DATASET_DIR, 'cifar_classes.txt')

        if not util.isDirOrFileExist(classmapfile):
            cifarClasses = util.cifarClassmap(cifarClassesFile)
            util.writeClassmapFile(classmapfile, cifarClasses)

        cifarReader = CifarReader()

        for datasetType in dataset_types:
            self.cifar_write_records(cifarReader, CIFAR_TFRECORDS_DIR, 'cifar',
                                     datasetType)
Ejemplo n.º 11
0
    def start_lrw_writer(self):

        util.create_dir(LRW_DATASET_DIR)
        util.create_dir(LRW_TFRECORDS_DIR)

        dataset_types = config.config['dataset_types']
        classmapfile = os.path.join(LRW_TFRECORDS_DIR, 'lrw_classmap.txt')

        if util.isDirOrFileExist(classmapfile):
            lrwClasses = util.readClassmapFile(classmapfile, numsAsKeys=False)
        else:
            lrwClasses = util.lrwWordsToNumbers(LRW_DATASET_DIR)
            util.writeClassmapFile(classmapfile, lrwClasses)

        lrwReader = LRWReader()

        for datasetType in dataset_types:
            lrwMap = lrwReader.readLRWtoMap(LRW_DATASET_DIR, datasetType)
            self.lrw_write_records(lrwMap, lrwClasses, 'lrw', datasetType)
Ejemplo n.º 12
0
    def start_lrw_writer(self):

        util.create_dir(LRW_DATASET_DIR)
        util.create_dir(LRW_TFRECORDS_DIR)

        dataset_types = config.config['dataset_types']
        classmapfile = os.path.join(LRW_TFRECORDS_DIR, 'lrw_classmap.txt')

        if util.isDirOrFileExist(classmapfile):
            lrwClasses = util.readClassmapFile(classmapfile, numsAsKeys=False)
        else:
            lrwClasses = util.lrwWordsToNumbers(LRW_DATASET_DIR)
            util.writeClassmapFile(classmapfile, lrwClasses)

        lrwReader = LRWReader()

        for datasetType in dataset_types:
            lrwMap = lrwReader.readLRWtoMap(LRW_DATASET_DIR, datasetType)
            self.lrw_write_records(lrwMap, lrwClasses, 'lrw', datasetType)
Ejemplo n.º 13
0
    def lrw_write_records(self, videoMap, videoClassMap, datasetname,
                          dataset_type):

        print("Creating dir for saving tf records!")
        dirpath = util.create_dir(os.path.join(LRW_TFRECORDS_DIR,
                                               dataset_type))

        print("Clearing already written items!")
        videoMap = self.clear_already_written(dirpath, videoMap)

        print("Shuffling data!")
        videoList = list(videoMap.items())
        shuffle(videoList)

        print("Starting processing {} images".format(dataset_type))
        batchSize = WRITE_BATCH_SIZE
        batchIndexer = self.get_number_of_written_tfrecords(dirpath)
        totalBatches = int(len(videoList) / batchSize) + 1
        batchCounter = 0

        while len(videoList) > 0:

            print("Writing batch {}/{}".format(batchCounter + 1, totalBatches))
            currentBatch = videoList[:batchSize]
            if len(videoList) <= batchSize:
                videoList = []
            else:
                videoList = videoList[batchSize:]

            batchWithImagesList = []
            for video in currentBatch:
                resizedVideos = len(batchWithImagesList)
                if not resizedVideos % 100:
                    print("Videos read and resized: {}/{}".format(
                        resizedVideos, len(currentBatch)))
                images = util.video_to_images(video[0])
                images = self.resize_images_lrw(images)
                batchWithImagesList.append(
                    (video[0], video[1], videoClassMap[video[1]], images))

            batch_filename = os.path.join(
                dirpath, '{}_{}_batch_{}.{}'.format(datasetname, dataset_type,
                                                    batchIndexer, "tfrecords"))
            self.writeLRW(batch_filename, batchWithImagesList, dataset_type)

            batch_stat_filename = os.path.join(
                dirpath, '{}_{}_batch_{}.{}'.format(datasetname, dataset_type,
                                                    batchIndexer, "txt"))
            self.write_written_images(batch_stat_filename, batchWithImagesList)

            batchIndexer += 1
            batchCounter += 1
Ejemplo n.º 14
0
    def lrw_write_records(self, videoMap, videoClassMap, datasetname, dataset_type):

        print("Creating dir for saving tf records!")
        dirpath = util.create_dir(os.path.join(LRW_TFRECORDS_DIR, dataset_type))

        print("Clearing already written items!")
        videoMap = self.clear_already_written(dirpath, videoMap)

        print("Shuffling data!")
        videoList = list(videoMap.items())
        shuffle(videoList)

        print("Starting processing {} images".format(dataset_type))
        batchSize = WRITE_BATCH_SIZE
        batchIndexer = self.get_number_of_written_tfrecords(dirpath)
        totalBatches = int(len(videoList)/batchSize) + 1
        batchCounter = 0

        while len(videoList) > 0:

            print("Writing batch {}/{}".format(batchCounter + 1, totalBatches))
            currentBatch = videoList[:batchSize]
            if len(videoList) <= batchSize:
                videoList = []
            else:
                videoList = videoList[batchSize:]

            batchWithImagesList = []
            for video in currentBatch:
                resizedVideos = len(batchWithImagesList)
                if not resizedVideos % 100:
                    print("Videos read and resized: {}/{}".format(resizedVideos, len(currentBatch)))
                images = util.video_to_images(video[0])
                images = self.resize_images_lrw(images)
                batchWithImagesList.append((video[0], video[1], videoClassMap[video[1]], images))

            batch_filename = os.path.join(dirpath, '{}_{}_batch_{}.{}'.format(datasetname, dataset_type, batchIndexer, "tfrecords"))
            self.writeLRW(batch_filename, batchWithImagesList, dataset_type)

            batch_stat_filename = os.path.join(dirpath, '{}_{}_batch_{}.{}'.format(datasetname, dataset_type, batchIndexer, "txt"))
            self.write_written_images(batch_stat_filename, batchWithImagesList)

            batchIndexer += 1
            batchCounter += 1
Ejemplo n.º 15
0
    return jsonify({
        'brand': brand,
        'batch': batch,
        'tempRegion1': pred[0],
        'tempRegion2': pred[1],
        'time': time,
        'version': '1',
        'deviceStatus': 'deviceStatus'
    })


@app.route('/api/load_model_config')
def api_load_model_config():
    stage = request.args.get("stage")
    if stage == 'produce':
        return jsonify({'window_size': FEATURE_RANGE, 'block_size': int(FEATURE_RANGE / SPLIT_NUM)})
    elif stage == 'transition':
        return jsonify({'window_size': TRANSITION_FEATURE_RANGE, 'block_size': int(TRANSITION_FEATURE_RANGE / TRANSITION_SPLIT_NUM)})
    else:
        raise Exception('param error')


if __name__ == '__main__':
    create_dir(MODEL_SAVE_DIR)

    model_produce.load(MODEL_SAVE_DIR + load_best_model_prefix('produce'))
    model_transition.load(MODEL_SAVE_DIR + load_best_model_prefix('transition'))

    app.run(host='0.0.0.0')