def applyAugmentation(self, passes=np.inf):
        epochs = 0
        aap = AspectAwarePreprocessor(self.width, self.height)
        while epochs < passes:

            for i in np.arange(0, self.numImages, self.batchSize):
                imagPaths = self.imagePaths[i:i + self.batchSize]
                labPaths = self.labelPaths[i:i + self.batchSize]
                images = [
                    aap.preprocess(cv2.imread(imagePath))
                    for imagePath in imagPaths
                ]
                labels = [
                    aap.preprocess(cv2.imread(labelPath))
                    for labelPath in labPaths
                ]
                images_labels = [
                    readAndGenerateImageSegmentation(image, label,
                                                     self.generators)
                    for (image, label) in zip(images, labels)
                ]
                images = [i[0] for i in images_labels]
                labels = [i[1] for i in images_labels]
                yield (images, labels)

            epochs += 1
class HDF5LinearClassificationAugmentor:

    # All images must have same width and height
    def __init__(self,inputPath,parameters):
        IAugmentor.__init__(self)
        self.inputPath = inputPath
        # output path represents the h5py file where dataset will be stored
        if parameters["outputPath"]:
            self.outputPath = parameters["outputPath"]
        else:
            raise ValueError("You should provide an output path in the parameters")

        self.generators = []
        if parameters["width"]:
            self.width = parameters["width"]
        else:
            raise ValueError("You should provide a width in the parameters")
        if parameters["height"]:
            self.width = parameters["height"]
        else:
            raise ValueError("You should provide a height in the parameters")

        self.aw = AspectAwarePreprocessor(self.width,self.height)

    def addGenerator(self, generator):
        self.generators.append(generator)

    def readImagesAndAnnotations(self):
        self.imagePaths = list(paths.list_images(self.inputPath))


    def applyAugmentation(self):
        self.readImagesAndAnnotations()
        le = LabelEncoder()
        labels = [p.split(os.path.sep)[-2] for p in self.imagePaths]
        labels = le.fit_transform(labels)
        writer = HDF5DatasetWriterClassification((len(self.imagePaths)*len(self.generators),self.width,self.height,3),
                                   self.outputPath)
        # We need to define this function outside to work in parallel.
        writer.storeClassLabels(le.classes_)
        widgets = ["Processing images: ", progressbar.Percentage(), " ",
                   progressbar.Bar(), " ", progressbar.ETA()]
        pbar = progressbar.ProgressBar(maxval=len(self.imagePaths),
                                       widgets=widgets).start()
        for i_and_imagePath in enumerate(zip(self.imagePaths,labels)):
            (i, (imagePath,label)) = i_and_imagePath
            image = cv2.imread(imagePath)
            image = self.aw.preprocess(image)
            for (j, generator) in enumerate(self.generators):
                newimage = generator.applyForClassification(image)
                newimage = self.aw.preprocess(newimage)
                writer.add([newimage],[label])
            pbar.update(i)
        writer.close()
        pbar.finish()
    def applyAugmentation(self,passes=np.inf):
        epochs = 0
        aap = AspectAwarePreprocessor(self.width,self.height)
        batch_features = np.zeros((self.batchSize, self.width, self.height, 3))
        batch_labels = np.zeros((self.batchSize, self.classes))
        while epochs < passes:

            for i in np.arange(0, self.numImages, self.batchSize):
                imagPaths = self.imagePaths[i:i+self.batchSize]
                labels = self.labels[i:i+self.batchSize]
                images = [cv2.imread(imagePath) for imagePath in imagPaths]
                images = [aap.preprocess(readAndGenerateImage(image,self.generators)) for image in images]
                for j in range(self.batchSize):

                    index = random.randint(0,len(images)-1)
                    batch_features[j] = images[index]
                    batch_labels[j] = labels[index]
                yield (batch_features,batch_labels)


            epochs += 1
Ejemplo n.º 4
0
class HDF5PowerSegmentationAugmentor:

    # All images must have same width and height
    def __init__(self, inputPath, parameters):
        IAugmentor.__init__(self)
        self.inputPath = inputPath
        self.imagesPath = inputPath + "images/"
        self.labelsPath = inputPath + "labels/"
        # output path represents the folder where the images will be stored
        if parameters["outputPath"]:
            self.outputPath = parameters["outputPath"]
        else:
            raise ValueError(
                "You should provide an output path in the parameters")

        self.generators = []
        if parameters["width"]:
            self.width = parameters["width"]
        else:
            raise ValueError("You should provide a width in the parameters")
        if parameters["height"]:
            self.width = parameters["height"]
        else:
            raise ValueError("You should provide a height in the parameters")
        if parameters["labelsExtension"]:
            self.labelsExtension = parameters["labelsExtension"]
        else:
            self.labelsExtension = ".tiff"
        self.aw = AspectAwarePreprocessor(self.width, self.height)

    def addGenerator(self, generator):
        self.generators.append(generator)

    def readImagesAndAnnotations(self):
        self.imagePaths = list(
            paths.list_files(self.imagesPath,
                             validExts=(".jpg", ".jpeg", ".png", ".bmp",
                                        ".tiff", ".tif")))
        self.labelPaths = list(
            paths.list_files(self.labelsPath,
                             validExts=(".jpg", ".jpeg", ".png", ".bmp",
                                        ".tiff", ".tif")))
        if (len(self.imagePaths) != len(self.labelPaths)):
            raise Exception(
                "The number of files is different in the folder of images and in the folder of labels"
            )

    def applyAugmentation(self):
        self.readImagesAndAnnotations()
        widgets = [
            "Processing images: ",
            progressbar.Percentage(), " ",
            progressbar.Bar(), " ",
            progressbar.ETA()
        ]

        pbar = progressbar.ProgressBar(maxval=len(self.imagePaths),
                                       widgets=widgets).start()
        writer = HDF5DatasetWriterSegmentation(
            (len(self.imagePaths) *
             (2**(len(self.generators) - 1)), self.width, self.height, 3),
            self.outputPath)

        for i_and_imagePath in enumerate(self.imagePaths):
            (i, imagePath) = i_and_imagePath
            image = cv2.imread(imagePath)
            image = self.aw.preprocess(image)
            name = imagePath.split(os.path.sep)[-1]
            labelPath = '/'.join(
                imagePath.split(os.path.sep)[:-2]
            ) + "/labels/" + name[0:name.rfind(".")] + self.labelsExtension
            label = cv2.imread(labelPath)
            label = self.aw.preprocess(label)
            images = [image]
            labels = [label]
            for (j, generator) in enumerate(self.generators):

                newimages = []
                newlabels = []
                for (k, (im, la)) in enumerate(zip(images, labels)):
                    (newimage,
                     newlabel) = generator.applyForSegmentation(im, la)
                    newimage = self.aw.preprocess(newimage)
                    newlabel = self.aw.preprocess(newlabel)
                    writer.add([newimage], [newlabel])
                    newimages.append(newimage)
                    newlabels.append(newlabel)
                images = newimages
                labels = newlabels

            pbar.update(i)
        writer.close()
        pbar.finish()